[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5141 Use HBaseFactoryProvider.getConfigurationFactory to get the config in PhoenixRDD

2019-02-15 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 0a8bf0f  PHOENIX-5141 Use HBaseFactoryProvider.getConfigurationFactory 
to get the config in PhoenixRDD
0a8bf0f is described below

commit 0a8bf0f1ca39fbfd3a2d56b4e96b4182b6d2cb65
Author: Thomas D'Silva 
AuthorDate: Fri Feb 15 16:44:02 2019 -0800

PHOENIX-5141 Use HBaseFactoryProvider.getConfigurationFactory to get the 
config in PhoenixRDD
---
 .../src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index cca2e6d..34033b7 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -21,6 +21,7 @@ import org.apache.hadoop.io.NullWritable
 import org.apache.phoenix.jdbc.PhoenixDriver
 import org.apache.phoenix.mapreduce.PhoenixInputFormat
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil
+import org.apache.phoenix.query.HBaseFactoryProvider
 import org.apache.spark._
 import org.apache.spark.annotation.DeveloperApi
 import org.apache.spark.rdd.RDD
@@ -76,9 +77,7 @@ class PhoenixRDD(sc: SparkContext, table: String, columns: 
Seq[String],
 
   def getPhoenixConfiguration: Configuration = {
 
-// This is just simply not serializable, so don't try, but clone it because
-// PhoenixConfigurationUtil mutates it.
-val config = HBaseConfiguration.create(conf)
+val config = 
HBaseFactoryProvider.getConfigurationFactory.getConfiguration(conf);
 
 PhoenixConfigurationUtil.setInputClass(config, 
classOf[PhoenixRecordWritable])
 PhoenixConfigurationUtil.setInputTableName(config, table)



[phoenix] branch 4.x-HBase-1.2 updated: PHOENIX-5124 Add config to enable PropertyPolicyProvider (addendum)

2019-02-14 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new f2924e5  PHOENIX-5124 Add config to enable PropertyPolicyProvider 
(addendum)
f2924e5 is described below

commit f2924e592403131b805b6d9267df6585d29aabad
Author: Thomas D'Silva 
AuthorDate: Thu Feb 14 18:45:12 2019 -0800

PHOENIX-5124 Add config to enable PropertyPolicyProvider (addendum)
---
 .../org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java | 6 ++
 .../src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala| 1 +
 2 files changed, 7 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
index b0ea17b..b81394b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
@@ -47,6 +47,7 @@ import 
org.apache.phoenix.mapreduce.ImportPreUpsertKeyValueProcessor;
 import org.apache.phoenix.mapreduce.PhoenixInputFormat;
 import org.apache.phoenix.mapreduce.index.IndexScrutinyTool.OutputFormat;
 import org.apache.phoenix.mapreduce.index.IndexScrutinyTool.SourceTable;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableKey;
@@ -212,6 +213,11 @@ public final class PhoenixConfigurationUtil {
 Preconditions.checkNotNull(inputQuery);
 configuration.set(SELECT_STATEMENT, inputQuery);
 }
+
+public static void setPropertyPolicyProviderDisabled(final Configuration 
configuration) {
+Preconditions.checkNotNull(configuration);
+configuration.set(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED, 
"false");
+}
 
 public static void setSchemaType(Configuration configuration, final 
SchemaType schemaType) {
 Preconditions.checkNotNull(configuration);
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index 7331a5f..cca2e6d 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -82,6 +82,7 @@ class PhoenixRDD(sc: SparkContext, table: String, columns: 
Seq[String],
 
 PhoenixConfigurationUtil.setInputClass(config, 
classOf[PhoenixRecordWritable])
 PhoenixConfigurationUtil.setInputTableName(config, table)
+PhoenixConfigurationUtil.setPropertyPolicyProviderDisabled(config);
 
 if(!columns.isEmpty) {
   PhoenixConfigurationUtil.setSelectColumnNames(config, columns.toArray)



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5124 Add config to enable PropertyPolicyProvider (addendum)

2019-02-14 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new c08acfc  PHOENIX-5124 Add config to enable PropertyPolicyProvider 
(addendum)
c08acfc is described below

commit c08acfc00d9b89cc2e86ffd315a2c3c819566d6f
Author: Thomas D'Silva 
AuthorDate: Thu Feb 14 18:45:12 2019 -0800

PHOENIX-5124 Add config to enable PropertyPolicyProvider (addendum)
---
 .../org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java | 6 ++
 .../src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala| 1 +
 2 files changed, 7 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
index 83e6607..8fa21fe 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
@@ -47,6 +47,7 @@ import 
org.apache.phoenix.mapreduce.ImportPreUpsertKeyValueProcessor;
 import org.apache.phoenix.mapreduce.PhoenixInputFormat;
 import org.apache.phoenix.mapreduce.index.IndexScrutinyTool.OutputFormat;
 import org.apache.phoenix.mapreduce.index.IndexScrutinyTool.SourceTable;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableKey;
@@ -224,6 +225,11 @@ public final class PhoenixConfigurationUtil {
 Preconditions.checkNotNull(inputQuery);
 configuration.set(SELECT_STATEMENT, inputQuery);
 }
+
+public static void setPropertyPolicyProviderDisabled(final Configuration 
configuration) {
+Preconditions.checkNotNull(configuration);
+configuration.set(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED, 
"false");
+}
 
 public static void setSchemaType(Configuration configuration, final 
SchemaType schemaType) {
 Preconditions.checkNotNull(configuration);
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index 7331a5f..cca2e6d 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -82,6 +82,7 @@ class PhoenixRDD(sc: SparkContext, table: String, columns: 
Seq[String],
 
 PhoenixConfigurationUtil.setInputClass(config, 
classOf[PhoenixRecordWritable])
 PhoenixConfigurationUtil.setInputTableName(config, table)
+PhoenixConfigurationUtil.setPropertyPolicyProviderDisabled(config);
 
 if(!columns.isEmpty) {
   PhoenixConfigurationUtil.setSelectColumnNames(config, columns.toArray)



[phoenix] branch master updated: PHOENIX-5124 Add config to enable PropertyPolicyProvider (addendum)

2019-02-14 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 5f56a4b  PHOENIX-5124 Add config to enable PropertyPolicyProvider 
(addendum)
5f56a4b is described below

commit 5f56a4b993435f6b127d0c561d4a6c458f263e46
Author: Thomas D'Silva 
AuthorDate: Thu Feb 14 18:45:12 2019 -0800

PHOENIX-5124 Add config to enable PropertyPolicyProvider (addendum)
---
 .../org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java | 6 ++
 .../src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala| 1 +
 2 files changed, 7 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
index 5a561ea..8c76bde 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
@@ -48,6 +48,7 @@ import 
org.apache.phoenix.mapreduce.ImportPreUpsertKeyValueProcessor;
 import org.apache.phoenix.mapreduce.PhoenixInputFormat;
 import org.apache.phoenix.mapreduce.index.IndexScrutinyTool.OutputFormat;
 import org.apache.phoenix.mapreduce.index.IndexScrutinyTool.SourceTable;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableKey;
@@ -225,6 +226,11 @@ public final class PhoenixConfigurationUtil {
 Preconditions.checkNotNull(inputQuery);
 configuration.set(SELECT_STATEMENT, inputQuery);
 }
+
+public static void setPropertyPolicyProviderDisabled(final Configuration 
configuration) {
+Preconditions.checkNotNull(configuration);
+configuration.set(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED, 
"false");
+}
 
 public static void setSchemaType(Configuration configuration, final 
SchemaType schemaType) {
 Preconditions.checkNotNull(configuration);
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index 7331a5f..cca2e6d 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -82,6 +82,7 @@ class PhoenixRDD(sc: SparkContext, table: String, columns: 
Seq[String],
 
 PhoenixConfigurationUtil.setInputClass(config, 
classOf[PhoenixRecordWritable])
 PhoenixConfigurationUtil.setInputTableName(config, table)
+PhoenixConfigurationUtil.setPropertyPolicyProviderDisabled(config);
 
 if(!columns.isEmpty) {
   PhoenixConfigurationUtil.setSelectColumnNames(config, columns.toArray)



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5124 Add config to enable PropertyPolicyProvider (addendum)

2019-02-14 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 08c46f6  PHOENIX-5124 Add config to enable PropertyPolicyProvider 
(addendum)
08c46f6 is described below

commit 08c46f679cf329c5ba21a961c6e293d6cafeecbf
Author: Thomas D'Silva 
AuthorDate: Thu Feb 14 18:45:12 2019 -0800

PHOENIX-5124 Add config to enable PropertyPolicyProvider (addendum)
---
 .../org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java | 6 ++
 .../src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala| 1 +
 2 files changed, 7 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
index b0ea17b..b81394b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
@@ -47,6 +47,7 @@ import 
org.apache.phoenix.mapreduce.ImportPreUpsertKeyValueProcessor;
 import org.apache.phoenix.mapreduce.PhoenixInputFormat;
 import org.apache.phoenix.mapreduce.index.IndexScrutinyTool.OutputFormat;
 import org.apache.phoenix.mapreduce.index.IndexScrutinyTool.SourceTable;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableKey;
@@ -212,6 +213,11 @@ public final class PhoenixConfigurationUtil {
 Preconditions.checkNotNull(inputQuery);
 configuration.set(SELECT_STATEMENT, inputQuery);
 }
+
+public static void setPropertyPolicyProviderDisabled(final Configuration 
configuration) {
+Preconditions.checkNotNull(configuration);
+configuration.set(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED, 
"false");
+}
 
 public static void setSchemaType(Configuration configuration, final 
SchemaType schemaType) {
 Preconditions.checkNotNull(configuration);
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index 7331a5f..cca2e6d 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -82,6 +82,7 @@ class PhoenixRDD(sc: SparkContext, table: String, columns: 
Seq[String],
 
 PhoenixConfigurationUtil.setInputClass(config, 
classOf[PhoenixRecordWritable])
 PhoenixConfigurationUtil.setInputTableName(config, table)
+PhoenixConfigurationUtil.setPropertyPolicyProviderDisabled(config);
 
 if(!columns.isEmpty) {
   PhoenixConfigurationUtil.setSelectColumnNames(config, columns.toArray)



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5124 Add config to enable PropertyPolicyProvider

2019-02-14 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new bdb2b66  PHOENIX-5124 Add config to enable PropertyPolicyProvider
bdb2b66 is described below

commit bdb2b66d73683d2f116e130d53fa8978ccda5ccc
Author: Thomas D'Silva 
AuthorDate: Thu Feb 14 16:14:18 2019 -0800

PHOENIX-5124 Add config to enable PropertyPolicyProvider
---
 .../phoenix/end2end/PropertyPolicyProviderIT.java  | 26 --
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |  7 --
 .../org/apache/phoenix/query/QueryServices.java|  2 ++
 .../apache/phoenix/query/QueryServicesOptions.java |  2 ++
 .../org/apache/phoenix/util/PropertiesUtil.java| 26 --
 .../phoenix/query/PropertyPolicyProviderTest.java  | 10 +
 6 files changed, 19 insertions(+), 54 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
deleted file mode 100644
index 48508a9..000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
+++ /dev/null
@@ -1,26 +0,0 @@
-package org.apache.phoenix.end2end;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.phoenix.mapreduce.util.ConnectionUtil;
-import org.junit.Test;
-
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.Properties;
-
-public class PropertyPolicyProviderIT  extends ParallelStatsDisabledIT {
-
-@Test
-public void testUsingDefaultHBaseConfigs() throws SQLException {
-Configuration config = HBaseConfiguration.create();
-config.set(HConstants.ZOOKEEPER_QUORUM, getUrl());
-Properties properties=new Properties();
-properties.put("allowedProperty","value");
-try(
-Connection conn = ConnectionUtil.getInputConnection(config, 
properties)
-){}
-}
-
-}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index d74..d668758 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -244,8 +244,11 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
 this.isDescVarLengthRowKeyUpgrade = isDescVarLengthRowKeyUpgrade;
 
 // Filter user provided properties based on property policy, if
-// provided.
-
PropertyPolicyProvider.getPropertyPolicy().evaluate(PropertiesUtil.removeStandardHBasePhoenixConfig(info));
+// provided and QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED is true
+if 
(Boolean.valueOf(info.getProperty(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED,
+
String.valueOf(QueryServicesOptions.DEFAULT_PROPERTY_POLICY_PROVIDER_ENABLED
 {
+PropertyPolicyProvider.getPropertyPolicy().evaluate(info);
+}
 
 // Copy so client cannot change
 this.info = info == null ? new Properties() : PropertiesUtil
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 98e2ed3..9168367 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -309,6 +309,8 @@ public interface QueryServices extends SQLCloseable {
 // whether to enable server side RS -> RS calls for upsert select 
statements
 public static final String ENABLE_SERVER_UPSERT_SELECT 
="phoenix.client.enable.server.upsert.select";
 
+public static final String PROPERTY_POLICY_PROVIDER_ENABLED = 
"phoenix.property.policy.provider.enabled";
+
 // whether to trigger mutations on the server at all (UPSERT/DELETE or 
DELETE FROM)
 public static final String ENABLE_SERVER_SIDE_MUTATIONS 
="phoenix.client.enable.server.mutations";
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 816d76f..5a8a1b6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -349,6 +349,8 @@ public class QueryServicesOptions {
 
 public static final boolean 
DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK = false;
 
+public static final boolean DEFAULT_PROPERTY

[phoenix] branch master updated: PHOENIX-5124 Add config to enable PropertyPolicyProvider

2019-02-14 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new f65e570  PHOENIX-5124 Add config to enable PropertyPolicyProvider
f65e570 is described below

commit f65e570089a4a797009a82d748ffe97284b22a0f
Author: Thomas D'Silva 
AuthorDate: Thu Feb 14 16:14:18 2019 -0800

PHOENIX-5124 Add config to enable PropertyPolicyProvider
---
 .../phoenix/end2end/PropertyPolicyProviderIT.java  | 26 --
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |  7 --
 .../org/apache/phoenix/query/QueryServices.java|  2 ++
 .../apache/phoenix/query/QueryServicesOptions.java |  2 ++
 .../org/apache/phoenix/util/PropertiesUtil.java| 26 --
 .../phoenix/query/PropertyPolicyProviderTest.java  | 10 +
 6 files changed, 19 insertions(+), 54 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
deleted file mode 100644
index 48508a9..000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
+++ /dev/null
@@ -1,26 +0,0 @@
-package org.apache.phoenix.end2end;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.phoenix.mapreduce.util.ConnectionUtil;
-import org.junit.Test;
-
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.Properties;
-
-public class PropertyPolicyProviderIT  extends ParallelStatsDisabledIT {
-
-@Test
-public void testUsingDefaultHBaseConfigs() throws SQLException {
-Configuration config = HBaseConfiguration.create();
-config.set(HConstants.ZOOKEEPER_QUORUM, getUrl());
-Properties properties=new Properties();
-properties.put("allowedProperty","value");
-try(
-Connection conn = ConnectionUtil.getInputConnection(config, 
properties)
-){}
-}
-
-}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index d74..d668758 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -244,8 +244,11 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
 this.isDescVarLengthRowKeyUpgrade = isDescVarLengthRowKeyUpgrade;
 
 // Filter user provided properties based on property policy, if
-// provided.
-
PropertyPolicyProvider.getPropertyPolicy().evaluate(PropertiesUtil.removeStandardHBasePhoenixConfig(info));
+// provided and QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED is true
+if 
(Boolean.valueOf(info.getProperty(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED,
+
String.valueOf(QueryServicesOptions.DEFAULT_PROPERTY_POLICY_PROVIDER_ENABLED
 {
+PropertyPolicyProvider.getPropertyPolicy().evaluate(info);
+}
 
 // Copy so client cannot change
 this.info = info == null ? new Properties() : PropertiesUtil
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index fc11539..c21a785 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -309,6 +309,8 @@ public interface QueryServices extends SQLCloseable {
 // whether to enable server side RS -> RS calls for upsert select 
statements
 public static final String ENABLE_SERVER_UPSERT_SELECT 
="phoenix.client.enable.server.upsert.select";
 
+public static final String PROPERTY_POLICY_PROVIDER_ENABLED = 
"phoenix.property.policy.provider.enabled";
+
 // whether to trigger mutations on the server at all (UPSERT/DELETE or 
DELETE FROM)
 public static final String ENABLE_SERVER_SIDE_MUTATIONS 
="phoenix.client.enable.server.mutations";
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index e71e531..adcc784 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -350,6 +350,8 @@ public class QueryServicesOptions {
 
 public static final boolean 
DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK = false;
 
+public static final boolean DEFAULT_PROPERTY_POLICY_

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5124 Add config to enable PropertyPolicyProvider

2019-02-14 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 1872a60  PHOENIX-5124 Add config to enable PropertyPolicyProvider
1872a60 is described below

commit 1872a605cf71656c45764951b8157c49ae4c2086
Author: Thomas D'Silva 
AuthorDate: Thu Feb 14 16:14:18 2019 -0800

PHOENIX-5124 Add config to enable PropertyPolicyProvider
---
 .../phoenix/end2end/PropertyPolicyProviderIT.java  | 26 --
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |  7 --
 .../org/apache/phoenix/query/QueryServices.java|  2 ++
 .../apache/phoenix/query/QueryServicesOptions.java |  2 ++
 .../org/apache/phoenix/util/PropertiesUtil.java| 26 --
 .../phoenix/query/PropertyPolicyProviderTest.java  | 10 +
 6 files changed, 19 insertions(+), 54 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
deleted file mode 100644
index 48508a9..000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
+++ /dev/null
@@ -1,26 +0,0 @@
-package org.apache.phoenix.end2end;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.phoenix.mapreduce.util.ConnectionUtil;
-import org.junit.Test;
-
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.Properties;
-
-public class PropertyPolicyProviderIT  extends ParallelStatsDisabledIT {
-
-@Test
-public void testUsingDefaultHBaseConfigs() throws SQLException {
-Configuration config = HBaseConfiguration.create();
-config.set(HConstants.ZOOKEEPER_QUORUM, getUrl());
-Properties properties=new Properties();
-properties.put("allowedProperty","value");
-try(
-Connection conn = ConnectionUtil.getInputConnection(config, 
properties)
-){}
-}
-
-}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index d74..d668758 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -244,8 +244,11 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
 this.isDescVarLengthRowKeyUpgrade = isDescVarLengthRowKeyUpgrade;
 
 // Filter user provided properties based on property policy, if
-// provided.
-
PropertyPolicyProvider.getPropertyPolicy().evaluate(PropertiesUtil.removeStandardHBasePhoenixConfig(info));
+// provided and QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED is true
+if 
(Boolean.valueOf(info.getProperty(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED,
+
String.valueOf(QueryServicesOptions.DEFAULT_PROPERTY_POLICY_PROVIDER_ENABLED
 {
+PropertyPolicyProvider.getPropertyPolicy().evaluate(info);
+}
 
 // Copy so client cannot change
 this.info = info == null ? new Properties() : PropertiesUtil
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index fc11539..c21a785 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -309,6 +309,8 @@ public interface QueryServices extends SQLCloseable {
 // whether to enable server side RS -> RS calls for upsert select 
statements
 public static final String ENABLE_SERVER_UPSERT_SELECT 
="phoenix.client.enable.server.upsert.select";
 
+public static final String PROPERTY_POLICY_PROVIDER_ENABLED = 
"phoenix.property.policy.provider.enabled";
+
 // whether to trigger mutations on the server at all (UPSERT/DELETE or 
DELETE FROM)
 public static final String ENABLE_SERVER_SIDE_MUTATIONS 
="phoenix.client.enable.server.mutations";
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index fcf57c7..684e955 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -351,6 +351,8 @@ public class QueryServicesOptions {
 
 public static final boolean 
DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK = false;
 
+public static final boolean DEFAULT_PROPERTY

[phoenix] branch 4.x-HBase-1.2 updated: PHOENIX-5124 Add config to enable PropertyPolicyProvider

2019-02-14 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new 4292ea9  PHOENIX-5124 Add config to enable PropertyPolicyProvider
4292ea9 is described below

commit 4292ea976e43e0368967aa7a2e4d45dad48e4c09
Author: Thomas D'Silva 
AuthorDate: Thu Feb 14 16:14:18 2019 -0800

PHOENIX-5124 Add config to enable PropertyPolicyProvider
---
 .../phoenix/end2end/PropertyPolicyProviderIT.java  | 26 --
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |  7 --
 .../org/apache/phoenix/query/QueryServices.java|  2 ++
 .../apache/phoenix/query/QueryServicesOptions.java |  2 ++
 .../org/apache/phoenix/util/PropertiesUtil.java| 26 --
 .../phoenix/query/PropertyPolicyProviderTest.java  | 10 +
 6 files changed, 19 insertions(+), 54 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
deleted file mode 100644
index 48508a9..000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
+++ /dev/null
@@ -1,26 +0,0 @@
-package org.apache.phoenix.end2end;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.phoenix.mapreduce.util.ConnectionUtil;
-import org.junit.Test;
-
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.Properties;
-
-public class PropertyPolicyProviderIT  extends ParallelStatsDisabledIT {
-
-@Test
-public void testUsingDefaultHBaseConfigs() throws SQLException {
-Configuration config = HBaseConfiguration.create();
-config.set(HConstants.ZOOKEEPER_QUORUM, getUrl());
-Properties properties=new Properties();
-properties.put("allowedProperty","value");
-try(
-Connection conn = ConnectionUtil.getInputConnection(config, 
properties)
-){}
-}
-
-}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index d74..d668758 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -244,8 +244,11 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
 this.isDescVarLengthRowKeyUpgrade = isDescVarLengthRowKeyUpgrade;
 
 // Filter user provided properties based on property policy, if
-// provided.
-
PropertyPolicyProvider.getPropertyPolicy().evaluate(PropertiesUtil.removeStandardHBasePhoenixConfig(info));
+// provided and QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED is true
+if 
(Boolean.valueOf(info.getProperty(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED,
+
String.valueOf(QueryServicesOptions.DEFAULT_PROPERTY_POLICY_PROVIDER_ENABLED
 {
+PropertyPolicyProvider.getPropertyPolicy().evaluate(info);
+}
 
 // Copy so client cannot change
 this.info = info == null ? new Properties() : PropertiesUtil
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 779dc95..a7a8f04 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -309,6 +309,8 @@ public interface QueryServices extends SQLCloseable {
 // whether to enable server side RS -> RS calls for upsert select 
statements
 public static final String ENABLE_SERVER_UPSERT_SELECT 
="phoenix.client.enable.server.upsert.select";
 
+public static final String PROPERTY_POLICY_PROVIDER_ENABLED = 
"phoenix.property.policy.provider.enabled";
+
 // whether to trigger mutations on the server at all (UPSERT/DELETE or 
DELETE FROM)
 public static final String ENABLE_SERVER_SIDE_MUTATIONS 
="phoenix.client.enable.server.mutations";
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 54ca660..bd49bb6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -349,6 +349,8 @@ public class QueryServicesOptions {
 
 public static final boolean 
DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK = false;
 
+public static final boolean DEFAULT_PROPERTY

[phoenix] branch master updated: PHOENIX-5126 RegionScanner leak leading to store files not getting cleared

2019-02-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new b4d2745  PHOENIX-5126 RegionScanner leak leading to store files not 
getting cleared
b4d2745 is described below

commit b4d27456a271054fb7e56e61b53aa78a222b31f4
Author: Abhishek Singh Chouhan 
AuthorDate: Thu Feb 7 13:51:15 2019 -0800

PHOENIX-5126 RegionScanner leak leading to store files not getting cleared
---
 .../phoenix/hbase/index/covered/data/LocalTable.java  | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
index 85c54ce..402620f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
@@ -70,14 +70,15 @@ public class LocalTable implements LocalHBaseState {
 s.setTimeRange(0,ts);
 }
 Region region = this.env.getRegion();
-RegionScanner scanner = region.getScanner(s);
-List kvs = new ArrayList(1);
-boolean more = scanner.next(kvs);
-assert !more : "Got more than one result when scanning" + " a single row 
in the primary table!";
+try (RegionScanner scanner = region.getScanner(s)) {
+  List kvs = new ArrayList(1);
+  boolean more = scanner.next(kvs);
+  assert !more : "Got more than one result when scanning"
+  + " a single row in the primary table!";
 
-Result r = Result.create(kvs);
-scanner.close();
-return r;
+  Result r = Result.create(kvs);
+  return r;
+}
   }
 
 // Returns the smallest timestamp in the given cell lists.



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5126 RegionScanner leak leading to store files not getting cleared

2019-02-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 8cf2c9e  PHOENIX-5126 RegionScanner leak leading to store files not 
getting cleared
8cf2c9e is described below

commit 8cf2c9ea504750adf349e411a6fd9071a7e50a02
Author: Abhishek Singh Chouhan 
AuthorDate: Thu Feb 7 13:51:15 2019 -0800

PHOENIX-5126 RegionScanner leak leading to store files not getting cleared
---
 .../phoenix/hbase/index/covered/data/LocalTable.java  | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
index 85c54ce..402620f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
@@ -70,14 +70,15 @@ public class LocalTable implements LocalHBaseState {
 s.setTimeRange(0,ts);
 }
 Region region = this.env.getRegion();
-RegionScanner scanner = region.getScanner(s);
-List kvs = new ArrayList(1);
-boolean more = scanner.next(kvs);
-assert !more : "Got more than one result when scanning" + " a single row 
in the primary table!";
+try (RegionScanner scanner = region.getScanner(s)) {
+  List kvs = new ArrayList(1);
+  boolean more = scanner.next(kvs);
+  assert !more : "Got more than one result when scanning"
+  + " a single row in the primary table!";
 
-Result r = Result.create(kvs);
-scanner.close();
-return r;
+  Result r = Result.create(kvs);
+  return r;
+}
   }
 
 // Returns the smallest timestamp in the given cell lists.



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5126 RegionScanner leak leading to store files not getting cleared

2019-02-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new fc3bf8e  PHOENIX-5126 RegionScanner leak leading to store files not 
getting cleared
fc3bf8e is described below

commit fc3bf8ea29b6a0e13ec1a974e0e520b94fbae3bb
Author: Abhishek Singh Chouhan 
AuthorDate: Thu Feb 7 13:51:15 2019 -0800

PHOENIX-5126 RegionScanner leak leading to store files not getting cleared
---
 .../phoenix/hbase/index/covered/data/LocalTable.java  | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
index 85c54ce..402620f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
@@ -70,14 +70,15 @@ public class LocalTable implements LocalHBaseState {
 s.setTimeRange(0,ts);
 }
 Region region = this.env.getRegion();
-RegionScanner scanner = region.getScanner(s);
-List kvs = new ArrayList(1);
-boolean more = scanner.next(kvs);
-assert !more : "Got more than one result when scanning" + " a single row 
in the primary table!";
+try (RegionScanner scanner = region.getScanner(s)) {
+  List kvs = new ArrayList(1);
+  boolean more = scanner.next(kvs);
+  assert !more : "Got more than one result when scanning"
+  + " a single row in the primary table!";
 
-Result r = Result.create(kvs);
-scanner.close();
-return r;
+  Result r = Result.create(kvs);
+  return r;
+}
   }
 
 // Returns the smallest timestamp in the given cell lists.



[phoenix] branch 4.x-HBase-1.2 updated: PHOENIX-5126 RegionScanner leak leading to store files not getting cleared

2019-02-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new f7c8485  PHOENIX-5126 RegionScanner leak leading to store files not 
getting cleared
f7c8485 is described below

commit f7c848510b019b018dfa34bc8360ea573af6b052
Author: Abhishek Singh Chouhan 
AuthorDate: Thu Feb 7 13:51:15 2019 -0800

PHOENIX-5126 RegionScanner leak leading to store files not getting cleared
---
 .../phoenix/hbase/index/covered/data/LocalTable.java  | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
index 85c54ce..402620f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
@@ -70,14 +70,15 @@ public class LocalTable implements LocalHBaseState {
 s.setTimeRange(0,ts);
 }
 Region region = this.env.getRegion();
-RegionScanner scanner = region.getScanner(s);
-List kvs = new ArrayList(1);
-boolean more = scanner.next(kvs);
-assert !more : "Got more than one result when scanning" + " a single row 
in the primary table!";
+try (RegionScanner scanner = region.getScanner(s)) {
+  List kvs = new ArrayList(1);
+  boolean more = scanner.next(kvs);
+  assert !more : "Got more than one result when scanning"
+  + " a single row in the primary table!";
 
-Result r = Result.create(kvs);
-scanner.close();
-return r;
+  Result r = Result.create(kvs);
+  return r;
+}
   }
 
 // Returns the smallest timestamp in the given cell lists.



[phoenix] branch 4.14-HBase-1.3 updated: PHOENIX-5126 RegionScanner leak leading to store files not getting cleared

2019-02-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
 new 72cdf3a  PHOENIX-5126 RegionScanner leak leading to store files not 
getting cleared
72cdf3a is described below

commit 72cdf3ae116ac067c03ceed603958fe938008fe1
Author: Abhishek Singh Chouhan 
AuthorDate: Thu Feb 7 13:51:15 2019 -0800

PHOENIX-5126 RegionScanner leak leading to store files not getting cleared
---
 .../phoenix/hbase/index/covered/data/LocalTable.java  | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
index 85c54ce..402620f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
@@ -70,14 +70,15 @@ public class LocalTable implements LocalHBaseState {
 s.setTimeRange(0,ts);
 }
 Region region = this.env.getRegion();
-RegionScanner scanner = region.getScanner(s);
-List kvs = new ArrayList(1);
-boolean more = scanner.next(kvs);
-assert !more : "Got more than one result when scanning" + " a single row 
in the primary table!";
+try (RegionScanner scanner = region.getScanner(s)) {
+  List kvs = new ArrayList(1);
+  boolean more = scanner.next(kvs);
+  assert !more : "Got more than one result when scanning"
+  + " a single row in the primary table!";
 
-Result r = Result.create(kvs);
-scanner.close();
-return r;
+  Result r = Result.create(kvs);
+  return r;
+}
   }
 
 // Returns the smallest timestamp in the given cell lists.



[phoenix] branch 4.14-HBase-1.2 updated: PHOENIX-5126 RegionScanner leak leading to store files not getting cleared

2019-02-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.2 by this push:
 new 32ed771  PHOENIX-5126 RegionScanner leak leading to store files not 
getting cleared
32ed771 is described below

commit 32ed7711b97b5b98cc1e7fdaaedfce966c7c26b1
Author: Abhishek Singh Chouhan 
AuthorDate: Thu Feb 7 13:51:15 2019 -0800

PHOENIX-5126 RegionScanner leak leading to store files not getting cleared
---
 .../phoenix/hbase/index/covered/data/LocalTable.java  | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
index 85c54ce..402620f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
@@ -70,14 +70,15 @@ public class LocalTable implements LocalHBaseState {
 s.setTimeRange(0,ts);
 }
 Region region = this.env.getRegion();
-RegionScanner scanner = region.getScanner(s);
-List kvs = new ArrayList(1);
-boolean more = scanner.next(kvs);
-assert !more : "Got more than one result when scanning" + " a single row 
in the primary table!";
+try (RegionScanner scanner = region.getScanner(s)) {
+  List kvs = new ArrayList(1);
+  boolean more = scanner.next(kvs);
+  assert !more : "Got more than one result when scanning"
+  + " a single row in the primary table!";
 
-Result r = Result.create(kvs);
-scanner.close();
-return r;
+  Result r = Result.create(kvs);
+  return r;
+}
   }
 
 // Returns the smallest timestamp in the given cell lists.



[phoenix] branch 4.14-HBase-1.4 updated: PHOENIX-5126 RegionScanner leak leading to store files not getting cleared

2019-02-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
 new 393d972  PHOENIX-5126 RegionScanner leak leading to store files not 
getting cleared
393d972 is described below

commit 393d972eddc860fc2fec0b7da5a4fa80be905f2c
Author: Abhishek Singh Chouhan 
AuthorDate: Thu Feb 7 13:51:15 2019 -0800

PHOENIX-5126 RegionScanner leak leading to store files not getting cleared
---
 .../phoenix/hbase/index/covered/data/LocalTable.java  | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
index 85c54ce..402620f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
@@ -70,14 +70,15 @@ public class LocalTable implements LocalHBaseState {
 s.setTimeRange(0,ts);
 }
 Region region = this.env.getRegion();
-RegionScanner scanner = region.getScanner(s);
-List kvs = new ArrayList(1);
-boolean more = scanner.next(kvs);
-assert !more : "Got more than one result when scanning" + " a single row 
in the primary table!";
+try (RegionScanner scanner = region.getScanner(s)) {
+  List kvs = new ArrayList(1);
+  boolean more = scanner.next(kvs);
+  assert !more : "Got more than one result when scanning"
+  + " a single row in the primary table!";
 
-Result r = Result.create(kvs);
-scanner.close();
-return r;
+  Result r = Result.create(kvs);
+  return r;
+}
   }
 
 // Returns the smallest timestamp in the given cell lists.



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase config properties (addendum)

2019-02-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 8935a39  PHOENIX-5124 PropertyPolicyProvider should not evaluate 
default hbase config properties (addendum)
8935a39 is described below

commit 8935a39b7477ca6dd33851b5d87cb2ef3cff9e5d
Author: Thomas D'Silva 
AuthorDate: Thu Feb 7 18:15:12 2019 -0800

PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase 
config properties (addendum)
---
 .../src/main/java/org/apache/phoenix/util/PropertiesUtil.java| 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
index b029a26..a52d979 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
@@ -76,6 +76,7 @@ public class PropertiesUtil {
 
 /**
  * Removes properties present that are present in standard HBase 
configuration and standard Phoenix properties
+ * These are then evaluated by the PropertyPolicyProvider.
  */
 public static Properties removeStandardHBasePhoenixConfig(Properties 
props) {
 Configuration config = HBaseConfiguration.create();
@@ -83,10 +84,12 @@ public class PropertiesUtil {
 for(Entry entry: props.entrySet()) {
 if ( entry.getKey() instanceof String) {
 String propName = (String) entry.getKey();
-if (config.get(propName) == null
-&& PhoenixEmbeddedDriver.DEFAULT_PROPS.get(propName) 
== null
+// add the property to the normalized list if its not a 
standard Phoenix property and
+// if the property is not defined in hbase-site.xml or if it 
is defined and its value is different
+if ( PhoenixEmbeddedDriver.DEFAULT_PROPS.get(propName) == null
 && !propName.equals(PhoenixRuntime.CURRENT_SCN_ATTRIB)
-&& !propName.equals(PhoenixRuntime.TENANT_ID_ATTRIB)) {
+&& !propName.equals(PhoenixRuntime.TENANT_ID_ATTRIB)
+&& (config.get(propName) == null || 
!config.get(propName).equals(entry.getValue()) )) {
 normalizedProps.put(propName, props.getProperty(propName));
 }
 }



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase config properties (addendum)

2019-02-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new cb557f2  PHOENIX-5124 PropertyPolicyProvider should not evaluate 
default hbase config properties (addendum)
cb557f2 is described below

commit cb557f2930909518fbebdd82bcfab9f81c39264d
Author: Thomas D'Silva 
AuthorDate: Thu Feb 7 18:15:12 2019 -0800

PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase 
config properties (addendum)
---
 .../src/main/java/org/apache/phoenix/util/PropertiesUtil.java| 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
index b029a26..a52d979 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
@@ -76,6 +76,7 @@ public class PropertiesUtil {
 
 /**
  * Removes properties present that are present in standard HBase 
configuration and standard Phoenix properties
+ * These are then evaluated by the PropertyPolicyProvider.
  */
 public static Properties removeStandardHBasePhoenixConfig(Properties 
props) {
 Configuration config = HBaseConfiguration.create();
@@ -83,10 +84,12 @@ public class PropertiesUtil {
 for(Entry entry: props.entrySet()) {
 if ( entry.getKey() instanceof String) {
 String propName = (String) entry.getKey();
-if (config.get(propName) == null
-&& PhoenixEmbeddedDriver.DEFAULT_PROPS.get(propName) 
== null
+// add the property to the normalized list if its not a 
standard Phoenix property and
+// if the property is not defined in hbase-site.xml or if it 
is defined and its value is different
+if ( PhoenixEmbeddedDriver.DEFAULT_PROPS.get(propName) == null
 && !propName.equals(PhoenixRuntime.CURRENT_SCN_ATTRIB)
-&& !propName.equals(PhoenixRuntime.TENANT_ID_ATTRIB)) {
+&& !propName.equals(PhoenixRuntime.TENANT_ID_ATTRIB)
+&& (config.get(propName) == null || 
!config.get(propName).equals(entry.getValue()) )) {
 normalizedProps.put(propName, props.getProperty(propName));
 }
 }



[phoenix] branch 4.x-HBase-1.2 updated: PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase config properties (addendum)

2019-02-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new 2ba428d  PHOENIX-5124 PropertyPolicyProvider should not evaluate 
default hbase config properties (addendum)
2ba428d is described below

commit 2ba428d89892dfd8cfd3a63e9d50a88146e88a71
Author: Thomas D'Silva 
AuthorDate: Thu Feb 7 18:15:12 2019 -0800

PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase 
config properties (addendum)
---
 .../src/main/java/org/apache/phoenix/util/PropertiesUtil.java| 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
index b029a26..a52d979 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
@@ -76,6 +76,7 @@ public class PropertiesUtil {
 
 /**
  * Removes properties present that are present in standard HBase 
configuration and standard Phoenix properties
+ * These are then evaluated by the PropertyPolicyProvider.
  */
 public static Properties removeStandardHBasePhoenixConfig(Properties 
props) {
 Configuration config = HBaseConfiguration.create();
@@ -83,10 +84,12 @@ public class PropertiesUtil {
 for(Entry entry: props.entrySet()) {
 if ( entry.getKey() instanceof String) {
 String propName = (String) entry.getKey();
-if (config.get(propName) == null
-&& PhoenixEmbeddedDriver.DEFAULT_PROPS.get(propName) 
== null
+// add the property to the normalized list if its not a 
standard Phoenix property and
+// if the property is not defined in hbase-site.xml or if it 
is defined and its value is different
+if ( PhoenixEmbeddedDriver.DEFAULT_PROPS.get(propName) == null
 && !propName.equals(PhoenixRuntime.CURRENT_SCN_ATTRIB)
-&& !propName.equals(PhoenixRuntime.TENANT_ID_ATTRIB)) {
+&& !propName.equals(PhoenixRuntime.TENANT_ID_ATTRIB)
+&& (config.get(propName) == null || 
!config.get(propName).equals(entry.getValue()) )) {
 normalizedProps.put(propName, props.getProperty(propName));
 }
 }



[phoenix] branch master updated: PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase config properties (addendum)

2019-02-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new b5fb4c4  PHOENIX-5124 PropertyPolicyProvider should not evaluate 
default hbase config properties (addendum)
b5fb4c4 is described below

commit b5fb4c47d4a7ef52a669f46f8606fdcd0f795214
Author: Thomas D'Silva 
AuthorDate: Thu Feb 7 18:15:12 2019 -0800

PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase 
config properties (addendum)
---
 .../src/main/java/org/apache/phoenix/util/PropertiesUtil.java| 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
index b029a26..a52d979 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
@@ -76,6 +76,7 @@ public class PropertiesUtil {
 
 /**
  * Removes properties present that are present in standard HBase 
configuration and standard Phoenix properties
+ * These are then evaluated by the PropertyPolicyProvider.
  */
 public static Properties removeStandardHBasePhoenixConfig(Properties 
props) {
 Configuration config = HBaseConfiguration.create();
@@ -83,10 +84,12 @@ public class PropertiesUtil {
 for(Entry entry: props.entrySet()) {
 if ( entry.getKey() instanceof String) {
 String propName = (String) entry.getKey();
-if (config.get(propName) == null
-&& PhoenixEmbeddedDriver.DEFAULT_PROPS.get(propName) 
== null
+// add the property to the normalized list if its not a 
standard Phoenix property and
+// if the property is not defined in hbase-site.xml or if it 
is defined and its value is different
+if ( PhoenixEmbeddedDriver.DEFAULT_PROPS.get(propName) == null
 && !propName.equals(PhoenixRuntime.CURRENT_SCN_ATTRIB)
-&& !propName.equals(PhoenixRuntime.TENANT_ID_ATTRIB)) {
+&& !propName.equals(PhoenixRuntime.TENANT_ID_ATTRIB)
+&& (config.get(propName) == null || 
!config.get(propName).equals(entry.getValue()) )) {
 normalizedProps.put(propName, props.getProperty(propName));
 }
 }



[phoenix] branch master updated: PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase config properties

2019-02-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new d17f943  PHOENIX-5124 PropertyPolicyProvider should not evaluate 
default hbase config properties
d17f943 is described below

commit d17f943b882afbc7b0b7ee7e252d324ed4de462c
Author: Thomas D'Silva 
AuthorDate: Mon Feb 4 23:17:37 2019 -0800

PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase 
config properties
---
 .../apache/phoenix/end2end/AppendOnlySchemaIT.java |  2 +-
 .../phoenix/end2end/PropertyPolicyProviderIT.java  | 26 
 .../java/org/apache/phoenix/rpc/UpdateCacheIT.java |  2 +-
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |  2 +-
 .../apache/phoenix/jdbc/PhoenixEmbeddedDriver.java |  7 ++
 .../org/apache/phoenix/util/PropertiesUtil.java| 28 +-
 6 files changed, 58 insertions(+), 9 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
index b39c4f0..e1c56ea 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
@@ -70,7 +70,7 @@ public class AppendOnlySchemaIT extends 
ParallelStatsDisabledIT {
 Mockito.spy(driver.getConnectionQueryServices(getUrl(),
 PropertiesUtil.deepCopy(TEST_PROPERTIES)));
 Properties props = new Properties();
-props.putAll(PhoenixEmbeddedDriver.DEFFAULT_PROPS.asMap());
+props.putAll(PhoenixEmbeddedDriver.DEFAULT_PROPS.asMap());
 
 try (Connection conn1 = connectionQueryServices.connect(getUrl(), 
props);
 Connection conn2 = sameClient ? conn1 : 
connectionQueryServices.connect(getUrl(), props)) {
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
new file mode 100644
index 000..48508a9
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
@@ -0,0 +1,26 @@
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.phoenix.mapreduce.util.ConnectionUtil;
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.Properties;
+
+public class PropertyPolicyProviderIT  extends ParallelStatsDisabledIT {
+
+@Test
+public void testUsingDefaultHBaseConfigs() throws SQLException {
+Configuration config = HBaseConfiguration.create();
+config.set(HConstants.ZOOKEEPER_QUORUM, getUrl());
+Properties properties=new Properties();
+properties.put("allowedProperty","value");
+try(
+Connection conn = ConnectionUtil.getInputConnection(config, 
properties)
+){}
+}
+
+}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
index 2959b99..a1bdad7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
@@ -192,7 +192,7 @@ public class UpdateCacheIT extends ParallelStatsDisabledIT {
// use a spyed ConnectionQueryServices so we can verify calls 
to getTable
ConnectionQueryServices connectionQueryServices = 
Mockito.spy(driver.getConnectionQueryServices(getUrl(), 
PropertiesUtil.deepCopy(TEST_PROPERTIES)));
Properties props = new Properties();
-   props.putAll(PhoenixEmbeddedDriver.DEFFAULT_PROPS.asMap());
+   props.putAll(PhoenixEmbeddedDriver.DEFAULT_PROPS.asMap());
Connection conn = connectionQueryServices.connect(getUrl(), 
props);
try {
conn.setAutoCommit(false);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 596e27c..d74 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -245,7 +245,7 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
 
 // Filter user provided properties based on property policy, if
 // provided.
-PropertyPolicyProvider.getPropertyPolicy().evaluate(info);
+
PropertyPolicyProvider.getPropertyPolicy().evaluate(PropertiesUtil.removeStanda

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase config properties

2019-02-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new d3301e4  PHOENIX-5124 PropertyPolicyProvider should not evaluate 
default hbase config properties
d3301e4 is described below

commit d3301e45435e479cf8dfeda6f4bcf2873b3106c5
Author: Thomas D'Silva 
AuthorDate: Mon Feb 4 23:17:37 2019 -0800

PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase 
config properties
---
 .../apache/phoenix/end2end/AppendOnlySchemaIT.java |  2 +-
 .../phoenix/end2end/PropertyPolicyProviderIT.java  | 26 
 .../java/org/apache/phoenix/rpc/UpdateCacheIT.java |  2 +-
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |  2 +-
 .../apache/phoenix/jdbc/PhoenixEmbeddedDriver.java |  7 ++
 .../org/apache/phoenix/util/PropertiesUtil.java| 28 +-
 6 files changed, 58 insertions(+), 9 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
index b39c4f0..e1c56ea 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
@@ -70,7 +70,7 @@ public class AppendOnlySchemaIT extends 
ParallelStatsDisabledIT {
 Mockito.spy(driver.getConnectionQueryServices(getUrl(),
 PropertiesUtil.deepCopy(TEST_PROPERTIES)));
 Properties props = new Properties();
-props.putAll(PhoenixEmbeddedDriver.DEFFAULT_PROPS.asMap());
+props.putAll(PhoenixEmbeddedDriver.DEFAULT_PROPS.asMap());
 
 try (Connection conn1 = connectionQueryServices.connect(getUrl(), 
props);
 Connection conn2 = sameClient ? conn1 : 
connectionQueryServices.connect(getUrl(), props)) {
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
new file mode 100644
index 000..48508a9
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
@@ -0,0 +1,26 @@
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.phoenix.mapreduce.util.ConnectionUtil;
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.Properties;
+
+public class PropertyPolicyProviderIT  extends ParallelStatsDisabledIT {
+
+@Test
+public void testUsingDefaultHBaseConfigs() throws SQLException {
+Configuration config = HBaseConfiguration.create();
+config.set(HConstants.ZOOKEEPER_QUORUM, getUrl());
+Properties properties=new Properties();
+properties.put("allowedProperty","value");
+try(
+Connection conn = ConnectionUtil.getInputConnection(config, 
properties)
+){}
+}
+
+}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
index 2959b99..a1bdad7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
@@ -192,7 +192,7 @@ public class UpdateCacheIT extends ParallelStatsDisabledIT {
// use a spyed ConnectionQueryServices so we can verify calls 
to getTable
ConnectionQueryServices connectionQueryServices = 
Mockito.spy(driver.getConnectionQueryServices(getUrl(), 
PropertiesUtil.deepCopy(TEST_PROPERTIES)));
Properties props = new Properties();
-   props.putAll(PhoenixEmbeddedDriver.DEFFAULT_PROPS.asMap());
+   props.putAll(PhoenixEmbeddedDriver.DEFAULT_PROPS.asMap());
Connection conn = connectionQueryServices.connect(getUrl(), 
props);
try {
conn.setAutoCommit(false);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 596e27c..d74 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -245,7 +245,7 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
 
 // Filter user provided properties based on property policy, if
 // provided.
-PropertyPolicyProvider.getPropertyPolicy().evaluate(info);
+
PropertyPolicyProvider.getPropertyPolicy().evaluate(PropertiesUtil.removeS

[phoenix] branch 4.x-HBase-1.2 updated: PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase config properties

2019-02-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new 9910b60  PHOENIX-5124 PropertyPolicyProvider should not evaluate 
default hbase config properties
9910b60 is described below

commit 9910b604362517537d9dc2b5fc4a91f3beecda1d
Author: Thomas D'Silva 
AuthorDate: Mon Feb 4 23:17:37 2019 -0800

PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase 
config properties
---
 .../apache/phoenix/end2end/AppendOnlySchemaIT.java |  2 +-
 .../phoenix/end2end/PropertyPolicyProviderIT.java  | 26 
 .../java/org/apache/phoenix/rpc/UpdateCacheIT.java |  2 +-
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |  2 +-
 .../apache/phoenix/jdbc/PhoenixEmbeddedDriver.java |  7 ++
 .../org/apache/phoenix/util/PropertiesUtil.java| 28 +-
 6 files changed, 58 insertions(+), 9 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
index b39c4f0..e1c56ea 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
@@ -70,7 +70,7 @@ public class AppendOnlySchemaIT extends 
ParallelStatsDisabledIT {
 Mockito.spy(driver.getConnectionQueryServices(getUrl(),
 PropertiesUtil.deepCopy(TEST_PROPERTIES)));
 Properties props = new Properties();
-props.putAll(PhoenixEmbeddedDriver.DEFFAULT_PROPS.asMap());
+props.putAll(PhoenixEmbeddedDriver.DEFAULT_PROPS.asMap());
 
 try (Connection conn1 = connectionQueryServices.connect(getUrl(), 
props);
 Connection conn2 = sameClient ? conn1 : 
connectionQueryServices.connect(getUrl(), props)) {
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
new file mode 100644
index 000..48508a9
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
@@ -0,0 +1,26 @@
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.phoenix.mapreduce.util.ConnectionUtil;
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.Properties;
+
+public class PropertyPolicyProviderIT  extends ParallelStatsDisabledIT {
+
+@Test
+public void testUsingDefaultHBaseConfigs() throws SQLException {
+Configuration config = HBaseConfiguration.create();
+config.set(HConstants.ZOOKEEPER_QUORUM, getUrl());
+Properties properties=new Properties();
+properties.put("allowedProperty","value");
+try(
+Connection conn = ConnectionUtil.getInputConnection(config, 
properties)
+){}
+}
+
+}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
index 2959b99..a1bdad7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
@@ -192,7 +192,7 @@ public class UpdateCacheIT extends ParallelStatsDisabledIT {
// use a spyed ConnectionQueryServices so we can verify calls 
to getTable
ConnectionQueryServices connectionQueryServices = 
Mockito.spy(driver.getConnectionQueryServices(getUrl(), 
PropertiesUtil.deepCopy(TEST_PROPERTIES)));
Properties props = new Properties();
-   props.putAll(PhoenixEmbeddedDriver.DEFFAULT_PROPS.asMap());
+   props.putAll(PhoenixEmbeddedDriver.DEFAULT_PROPS.asMap());
Connection conn = connectionQueryServices.connect(getUrl(), 
props);
try {
conn.setAutoCommit(false);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 596e27c..d74 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -245,7 +245,7 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
 
 // Filter user provided properties based on property policy, if
 // provided.
-PropertyPolicyProvider.getPropertyPolicy().evaluate(info);
+
PropertyPolicyProvider.getPropertyPolicy().evaluate(PropertiesUtil.removeS

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase config properties

2019-02-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new c0824af  PHOENIX-5124 PropertyPolicyProvider should not evaluate 
default hbase config properties
c0824af is described below

commit c0824affd9a85eaa59dc03955af8ac0c59b883ce
Author: Thomas D'Silva 
AuthorDate: Mon Feb 4 23:17:37 2019 -0800

PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase 
config properties
---
 .../apache/phoenix/end2end/AppendOnlySchemaIT.java |  2 +-
 .../phoenix/end2end/PropertyPolicyProviderIT.java  | 26 
 .../java/org/apache/phoenix/rpc/UpdateCacheIT.java |  2 +-
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |  2 +-
 .../apache/phoenix/jdbc/PhoenixEmbeddedDriver.java |  7 ++
 .../org/apache/phoenix/util/PropertiesUtil.java| 28 +-
 6 files changed, 58 insertions(+), 9 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
index b39c4f0..e1c56ea 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
@@ -70,7 +70,7 @@ public class AppendOnlySchemaIT extends 
ParallelStatsDisabledIT {
 Mockito.spy(driver.getConnectionQueryServices(getUrl(),
 PropertiesUtil.deepCopy(TEST_PROPERTIES)));
 Properties props = new Properties();
-props.putAll(PhoenixEmbeddedDriver.DEFFAULT_PROPS.asMap());
+props.putAll(PhoenixEmbeddedDriver.DEFAULT_PROPS.asMap());
 
 try (Connection conn1 = connectionQueryServices.connect(getUrl(), 
props);
 Connection conn2 = sameClient ? conn1 : 
connectionQueryServices.connect(getUrl(), props)) {
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
new file mode 100644
index 000..48508a9
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PropertyPolicyProviderIT.java
@@ -0,0 +1,26 @@
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.phoenix.mapreduce.util.ConnectionUtil;
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.Properties;
+
+public class PropertyPolicyProviderIT  extends ParallelStatsDisabledIT {
+
+@Test
+public void testUsingDefaultHBaseConfigs() throws SQLException {
+Configuration config = HBaseConfiguration.create();
+config.set(HConstants.ZOOKEEPER_QUORUM, getUrl());
+Properties properties=new Properties();
+properties.put("allowedProperty","value");
+try(
+Connection conn = ConnectionUtil.getInputConnection(config, 
properties)
+){}
+}
+
+}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
index 2959b99..a1bdad7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
@@ -192,7 +192,7 @@ public class UpdateCacheIT extends ParallelStatsDisabledIT {
// use a spyed ConnectionQueryServices so we can verify calls 
to getTable
ConnectionQueryServices connectionQueryServices = 
Mockito.spy(driver.getConnectionQueryServices(getUrl(), 
PropertiesUtil.deepCopy(TEST_PROPERTIES)));
Properties props = new Properties();
-   props.putAll(PhoenixEmbeddedDriver.DEFFAULT_PROPS.asMap());
+   props.putAll(PhoenixEmbeddedDriver.DEFAULT_PROPS.asMap());
Connection conn = connectionQueryServices.connect(getUrl(), 
props);
try {
conn.setAutoCommit(false);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 596e27c..d74 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -245,7 +245,7 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
 
 // Filter user provided properties based on property policy, if
 // provided.
-PropertyPolicyProvider.getPropertyPolicy().evaluate(info);
+
PropertyPolicyProvider.getPropertyPolicy().evaluate(PropertiesUtil.removeS

[phoenix-connectors] branch master created (now 786e7e4)

2019-01-17 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix-connectors.git.


  at 786e7e4  Add README

This branch includes the following new commits:

 new 786e7e4  Add README

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.




[phoenix-connectors] 01/01: Add README

2019-01-17 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix-connectors.git

commit 786e7e42014d36039938492980534c6f8c435ef2
Author: Thomas D'Silva 
AuthorDate: Thu Jan 17 15:44:02 2019 -0800

Add README
---
 README.md | 22 ++
 1 file changed, 22 insertions(+)

diff --git a/README.md b/README.md
new file mode 100644
index 000..b012021
--- /dev/null
+++ b/README.md
@@ -0,0 +1,22 @@
+
+
+![logo](https://phoenix.apache.org/images/phoenix-logo-small.png)
+
+[Apache Phoenix](http://phoenix.apache.org/) enables OLTP and 
operational analytics in Hadoop for low latency applications. Visit the Apache 
Phoenix website [here](http://phoenix.apache.org/). This repo contains 
connectors for third party libraries to access data stored in Phoenix/HBase. 
+
+Copyright ©2019 [Apache Software Foundation](http://www.apache.org/). All 
Rights Reserved. 



[phoenix] branch 4.14-HBase-1.3 updated: modify index state based on client version to support old clients

2019-01-17 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
 new ff1876a  modify index state based on client version to support old 
clients
ff1876a is described below

commit ff1876a05462f1c8326315a183200b7a688292c9
Author: Kiran Kumar Maturi 
AuthorDate: Wed Jan 16 16:04:31 2019 +0530

modify index state based on client version to support old clients
---
 .../index/InvalidIndexStateClientSideIT.java   | 143 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  36 --
 2 files changed, 168 insertions(+), 11 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
new file mode 100644
index 000..aee9d5b
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MAJOR_VERSION;
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_PATCH_NUMBER;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetTableRequest;
+import 
org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService;
+import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.hbase.index.util.VersionUtil;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.Closeables;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Test;
+
+import com.sun.org.apache.commons.logging.Log;
+import com.sun.org.apache.commons.logging.LogFactory;
+
+public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
+private static final Log LOG = 
LogFactory.getLog(InvalidIndexStateClientSideIT.class);
+
+@Test
+public void testCachedConnections() throws Throwable {
+final String schemaName = generateUniqueName();
+final String tableName = generateUniqueName();
+final String fullTableName = SchemaUtil.getTableName(schemaName, 
tableName);
+final String indexName = generateUniqueName();
+final String fullIndexName = SchemaUtil.getTableName(schemaName, 
indexName);
+final Connection conn = DriverManager.getConnection(getUrl());
+
+// create table and indices
+String createTableSql =
+"CREATE TABLE " + fullTableName
++ "(org_id VARCHAR NOT NULL PRIMARY KEY, v1 INTEGER, 
v2 INTEGER, v3 INTEGER)";
+conn.createStatement().execute(createTableSql);
+conn.createStatement()
+.execute("CREATE INDEX " + indexName + " ON " + fullTableName 
+ "(v1)");
+conn.commit();
+PhoenixConnection phoenixConn = conn.unwrap(Ph

[phoenix] branch 4.14-HBase-1.4 updated: modify index state based on client version to support old clients

2019-01-17 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
 new 4c107e0  modify index state based on client version to support old 
clients
4c107e0 is described below

commit 4c107e0ece8e873f058107bd7a5dcd79ffa5d8fd
Author: Kiran Kumar Maturi 
AuthorDate: Wed Jan 16 16:04:31 2019 +0530

modify index state based on client version to support old clients
---
 .../index/InvalidIndexStateClientSideIT.java   | 143 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  36 --
 2 files changed, 168 insertions(+), 11 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
new file mode 100644
index 000..aee9d5b
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MAJOR_VERSION;
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_PATCH_NUMBER;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetTableRequest;
+import 
org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService;
+import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.hbase.index.util.VersionUtil;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.Closeables;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Test;
+
+import com.sun.org.apache.commons.logging.Log;
+import com.sun.org.apache.commons.logging.LogFactory;
+
+public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
+private static final Log LOG = 
LogFactory.getLog(InvalidIndexStateClientSideIT.class);
+
+@Test
+public void testCachedConnections() throws Throwable {
+final String schemaName = generateUniqueName();
+final String tableName = generateUniqueName();
+final String fullTableName = SchemaUtil.getTableName(schemaName, 
tableName);
+final String indexName = generateUniqueName();
+final String fullIndexName = SchemaUtil.getTableName(schemaName, 
indexName);
+final Connection conn = DriverManager.getConnection(getUrl());
+
+// create table and indices
+String createTableSql =
+"CREATE TABLE " + fullTableName
++ "(org_id VARCHAR NOT NULL PRIMARY KEY, v1 INTEGER, 
v2 INTEGER, v3 INTEGER)";
+conn.createStatement().execute(createTableSql);
+conn.createStatement()
+.execute("CREATE INDEX " + indexName + " ON " + fullTableName 
+ "(v1)");
+conn.commit();
+PhoenixConnection phoenixConn = conn.unwrap(Ph

[phoenix] branch 4.14-HBase-1.2 updated: modify index state based on client version to support old clients

2019-01-17 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.2 by this push:
 new b6222cb  modify index state based on client version to support old 
clients
b6222cb is described below

commit b6222cbd8850e737c11a685688df4d03a6f573bd
Author: Kiran Kumar Maturi 
AuthorDate: Wed Jan 16 16:04:31 2019 +0530

modify index state based on client version to support old clients
---
 .../index/InvalidIndexStateClientSideIT.java   | 143 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  36 --
 2 files changed, 168 insertions(+), 11 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
new file mode 100644
index 000..aee9d5b
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MAJOR_VERSION;
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_PATCH_NUMBER;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetTableRequest;
+import 
org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService;
+import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.hbase.index.util.VersionUtil;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.Closeables;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Test;
+
+import com.sun.org.apache.commons.logging.Log;
+import com.sun.org.apache.commons.logging.LogFactory;
+
+public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
+private static final Log LOG = 
LogFactory.getLog(InvalidIndexStateClientSideIT.class);
+
+@Test
+public void testCachedConnections() throws Throwable {
+final String schemaName = generateUniqueName();
+final String tableName = generateUniqueName();
+final String fullTableName = SchemaUtil.getTableName(schemaName, 
tableName);
+final String indexName = generateUniqueName();
+final String fullIndexName = SchemaUtil.getTableName(schemaName, 
indexName);
+final Connection conn = DriverManager.getConnection(getUrl());
+
+// create table and indices
+String createTableSql =
+"CREATE TABLE " + fullTableName
++ "(org_id VARCHAR NOT NULL PRIMARY KEY, v1 INTEGER, 
v2 INTEGER, v3 INTEGER)";
+conn.createStatement().execute(createTableSql);
+conn.createStatement()
+.execute("CREATE INDEX " + indexName + " ON " + fullTableName 
+ "(v1)");
+conn.commit();
+PhoenixConnection phoenixConn = conn.unwrap(Ph

[phoenix] branch master updated: PHOENIX-3623 Integrate Omid with Phoenix (addendum)

2019-01-17 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new c8686ce  PHOENIX-3623 Integrate Omid with Phoenix (addendum)
c8686ce is described below

commit c8686ce73093b879bbda5f08f3e7933a9708e110
Author: Yonatan Gottesman 
AuthorDate: Sun Jan 13 13:52:18 2019 +0200

PHOENIX-3623 Integrate Omid with Phoenix (addendum)
---
 phoenix-core/pom.xml   | 11 +++
 .../hbase/coprocessor/BaseRegionObserver.java  | 22 --
 .../coprocessor/OmidTransactionalProcessor.java| 11 ++-
 .../phoenix/transaction/OmidTransactionTable.java  |  4 +++-
 4 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 8056ff2..db5b909 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -217,6 +217,17 @@
   
 
   
+
+  org.apache.omid
+  omid-hbase-shims-hbase2.x
+  ${omid.version}
+  
+
+  org.testng
+  testng
+
+  
+
   
 org.apache.omid
 omid-tso-server-hbase2.x
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
deleted file mode 100644
index fa206bb..000
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.coprocessor;
-
-public class BaseRegionObserver implements RegionObserver{
-
-}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/OmidTransactionalProcessor.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/OmidTransactionalProcessor.java
index b84b5ae..afcfea8 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/OmidTransactionalProcessor.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/OmidTransactionalProcessor.java
@@ -17,11 +17,20 @@
  */
 package org.apache.phoenix.coprocessor;
 
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.RegionObserver;
 import org.apache.omid.transaction.OmidSnapshotFilter;
 import org.apache.phoenix.transaction.OmidTransactionProvider;
 
+import java.util.Optional;
 
-public class OmidTransactionalProcessor extends DelegateRegionObserver {
+
+public class OmidTransactionalProcessor extends DelegateRegionObserver 
implements RegionCoprocessor {
+
+@Override
+public Optional getRegionObserver() {
+return Optional.of(this);
+}
 
 public OmidTransactionalProcessor() {
 // Hack for testing - retrieves the commit table client from the 
singleton OmidTransactionProvider
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
index d27348d..5749d83 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
@@ -176,7 +176,9 @@ public class OmidTransactionTable implements Table {
 public void batch(List actions, Object[] results)
 throws IOException, InterruptedException {
 tTable.batch(tx, actions, addShadowCells);
-Arrays.fill(results, EMPTY_RESULT_EXISTS_TRUE);
+if (results != null) {
+Arrays.fill(results, EMPTY_RESULT_EXISTS_TRUE);
+}
 }
 
 @Override



[phoenix] 02/02: PHOENIX-5102 Filtering on DATE types throws an exception using the spark connector

2019-01-16 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 8273cc11d5465efa60dc4774adf5339655cec1eb
Author: Thomas D'Silva 
AuthorDate: Tue Jan 15 16:56:15 2019 -0800

PHOENIX-5102 Filtering on DATE types throws an exception using the spark 
connector
---
 .../src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala   | 6 +-
 .../org/apache/phoenix/spark/FilterExpressionCompiler.scala  | 9 +
 2 files changed, 14 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index d6d0f92..b40b638 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -624,7 +624,7 @@ class PhoenixSparkIT extends AbstractPhoenixSparkIT {
 varByteArray shouldEqual dataSet(0).get(3)
   }
 
-  test("Can load Phoenix DATE columns through DataFrame API") {
+  test("Can load and filter Phoenix DATE columns through DataFrame API") {
 val df = spark.sqlContext.read
   .format("phoenix")
   .options(Map("table" -> "DATE_TEST", PhoenixDataSource.ZOOKEEPER_URL -> 
quorumAddress))
@@ -638,6 +638,10 @@ class PhoenixSparkIT extends AbstractPhoenixSparkIT {
 // Note that Spark also applies the timezone offset to the returned date 
epoch. Rather than perform timezone
 // gymnastics, just make sure we're within 24H of the epoch generated just 
now
 assert(Math.abs(epoch - dt) < 8640)
+
+df.createOrReplaceTempView("DATE_TEST")
+val df2 = spark.sql("SELECT * FROM DATE_TEST WHERE COL1 > 
TO_DATE('1990-01-01 00:00:01', '-MM-dd HH:mm:ss')")
+assert(df2.count() == 1L)
   }
 
   test("Filter operation doesn't work for column names containing a white 
space (PHOENIX-2547)") {
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
index 74ff67e..1d6973c 100644
--- 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
+++ 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.spark
 
+import java.sql.Date
 import java.sql.Timestamp
 import java.text.Format
 
@@ -26,6 +27,7 @@ import org.apache.spark.sql.sources._
 
 class FilterExpressionCompiler() {
 
+  val dateformatter:Format = 
DateUtil.getDateFormatter(DateUtil.DEFAULT_DATE_FORMAT, 
DateUtil.DEFAULT_TIME_ZONE_ID)
   val timeformatter:Format = 
DateUtil.getTimestampFormatter(DateUtil.DEFAULT_TIME_FORMAT, 
DateUtil.DEFAULT_TIME_ZONE_ID)
 
   /**
@@ -102,6 +104,8 @@ class FilterExpressionCompiler() {
 
 case timestampValue: Timestamp => getTimestampString(timestampValue)
 
+case dateValue: Date => getDateString(dateValue)
+
 // Borrowed from 'elasticsearch-hadoop', support these internal UTF types 
across Spark versions
 // Spark 1.4
 case utf if (isClass(utf, "org.apache.spark.sql.types.UTF8String")) => 
s"'${escapeStringConstant(utf.toString)}'"
@@ -117,6 +121,11 @@ class FilterExpressionCompiler() {
   DateUtil.DEFAULT_TIME_FORMAT, DateUtil.DEFAULT_TIME_ZONE_ID)
   }
 
+  private def getDateString(dateValue: Date): String = {
+"TO_DATE('%s', '%s', '%s')".format(dateformatter.format(dateValue),
+  DateUtil.DEFAULT_DATE_FORMAT, DateUtil.DEFAULT_TIME_ZONE_ID)
+  }
+
   // Helper function to escape column key to work with SQL queries
   private def escapeKey(key: String): String = 
SchemaUtil.getEscapedFullColumnName(key)
 



[phoenix] 01/02: PHOENIX-5100 Add test to verify IndexTool can build global view indexes

2019-01-16 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 29e76fd53cf0aa57b1656cd717020e7a8566873f
Author: Thomas D'Silva 
AuthorDate: Mon Jan 14 14:14:31 2019 -0800

PHOENIX-5100 Add test to verify IndexTool can build global view indexes
---
 .../apache/phoenix/end2end/index/ViewIndexIT.java  | 102 +
 1 file changed, 62 insertions(+), 40 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
index 11cfc5c9..5fd023d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
@@ -39,9 +39,11 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.end2end.IndexToolIT;
 import org.apache.phoenix.end2end.SplitSystemCatalogIT;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.mapreduce.index.IndexTool;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PNameFactory;
@@ -218,50 +220,70 @@ public class ViewIndexIT extends SplitSystemCatalogIT {
 
 
 @Test
-public void testCreatingIndexOnGlobalView() throws Exception {
+public void testMultiTenantViewGlobalIndex() throws Exception {
 String baseTable =  SchemaUtil.getTableName(SCHEMA1, 
generateUniqueName());
-String globalView = SchemaUtil.getTableName(SCHEMA2, 
generateUniqueName());
+String globalViewName = generateUniqueName();
+String fullGlobalViewName = SchemaUtil.getTableName(SCHEMA2, 
globalViewName);
 String globalViewIdx =  generateUniqueName();
+String tenantView =  generateUniqueName();
 String fullIndexName = SchemaUtil.getTableName(SCHEMA2, globalViewIdx);
 try (Connection conn = DriverManager.getConnection(getUrl())) {
-conn.createStatement().execute("CREATE IMMUTABLE TABLE " + 
baseTable + " (TENANT_ID CHAR(15) NOT NULL, PK2 DATE NOT NULL, PK3 INTEGER NOT 
NULL, KV1 VARCHAR, KV2 VARCHAR, KV3 CHAR(15) CONSTRAINT PK PRIMARY 
KEY(TENANT_ID, PK2 ROW_TIMESTAMP, PK3)) MULTI_TENANT=true");
-conn.createStatement().execute("CREATE VIEW " + globalView + " AS 
SELECT * FROM " + baseTable);
-conn.createStatement().execute("CREATE INDEX " + globalViewIdx + " 
ON " + globalView + " (PK3 DESC, KV3) INCLUDE (KV1)");
-PreparedStatement stmt = conn.prepareStatement("UPSERT INTO  " + 
globalView + " (TENANT_ID, PK2, PK3, KV1, KV3) VALUES (?, ?, ?, ?, ?)");
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 1);
-stmt.setString(4, "KV1");
-stmt.setString(5, "KV3");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 2);
-stmt.setString(4, "KV4");
-stmt.setString(5, "KV5");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 3);
-stmt.setString(4, "KV6");
-stmt.setString(5, "KV7");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 4);
-stmt.setString(4, "KV8");
-stmt.setString(5, "KV9");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 5);
-stmt.setString(4, "KV10");
-stmt.setString(5, "KV11");
-stmt.executeUpdate();
-conn.commit();
-
+conn.createStatement().execute("CREATE TABLE " + baseTable + " 
(TENANT_ID CHAR(15) NOT NULL, PK2 DATE NOT NULL, PK3 INTEGER NOT NULL, KV1 
VARCHAR, KV2 VARCHAR, KV3 CHAR(15) CONSTRAINT PK PRIMARY KEY(TENANT_ID, PK2, 
PK3)) MULTI_TENANT=true");
+conn.createStatement().execute("CREATE VIEW " + fullGlobalViewName 
+ " AS SELECT * FROM " + baseTable);
+conn.createStatement().execute("CREATE INDEX " + globalViewIdx + " 
ON " + fullGlobalViewName + " (PK3 DESC, KV3) INCLUDE (KV1) ASYNC");
+
+  

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5102 Filtering on DATE types throws an exception using the spark connector

2019-01-16 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new d0843be  PHOENIX-5102 Filtering on DATE types throws an exception 
using the spark connector
d0843be is described below

commit d0843beb8ac422aff08710115de41aadd5571348
Author: Thomas D'Silva 
AuthorDate: Tue Jan 15 16:56:15 2019 -0800

PHOENIX-5102 Filtering on DATE types throws an exception using the spark 
connector
---
 .../src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala   | 6 +-
 .../org/apache/phoenix/spark/FilterExpressionCompiler.scala  | 9 +
 2 files changed, 14 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index d6d0f92..b40b638 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -624,7 +624,7 @@ class PhoenixSparkIT extends AbstractPhoenixSparkIT {
 varByteArray shouldEqual dataSet(0).get(3)
   }
 
-  test("Can load Phoenix DATE columns through DataFrame API") {
+  test("Can load and filter Phoenix DATE columns through DataFrame API") {
 val df = spark.sqlContext.read
   .format("phoenix")
   .options(Map("table" -> "DATE_TEST", PhoenixDataSource.ZOOKEEPER_URL -> 
quorumAddress))
@@ -638,6 +638,10 @@ class PhoenixSparkIT extends AbstractPhoenixSparkIT {
 // Note that Spark also applies the timezone offset to the returned date 
epoch. Rather than perform timezone
 // gymnastics, just make sure we're within 24H of the epoch generated just 
now
 assert(Math.abs(epoch - dt) < 8640)
+
+df.createOrReplaceTempView("DATE_TEST")
+val df2 = spark.sql("SELECT * FROM DATE_TEST WHERE COL1 > 
TO_DATE('1990-01-01 00:00:01', '-MM-dd HH:mm:ss')")
+assert(df2.count() == 1L)
   }
 
   test("Filter operation doesn't work for column names containing a white 
space (PHOENIX-2547)") {
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
index 74ff67e..1d6973c 100644
--- 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
+++ 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.spark
 
+import java.sql.Date
 import java.sql.Timestamp
 import java.text.Format
 
@@ -26,6 +27,7 @@ import org.apache.spark.sql.sources._
 
 class FilterExpressionCompiler() {
 
+  val dateformatter:Format = 
DateUtil.getDateFormatter(DateUtil.DEFAULT_DATE_FORMAT, 
DateUtil.DEFAULT_TIME_ZONE_ID)
   val timeformatter:Format = 
DateUtil.getTimestampFormatter(DateUtil.DEFAULT_TIME_FORMAT, 
DateUtil.DEFAULT_TIME_ZONE_ID)
 
   /**
@@ -102,6 +104,8 @@ class FilterExpressionCompiler() {
 
 case timestampValue: Timestamp => getTimestampString(timestampValue)
 
+case dateValue: Date => getDateString(dateValue)
+
 // Borrowed from 'elasticsearch-hadoop', support these internal UTF types 
across Spark versions
 // Spark 1.4
 case utf if (isClass(utf, "org.apache.spark.sql.types.UTF8String")) => 
s"'${escapeStringConstant(utf.toString)}'"
@@ -117,6 +121,11 @@ class FilterExpressionCompiler() {
   DateUtil.DEFAULT_TIME_FORMAT, DateUtil.DEFAULT_TIME_ZONE_ID)
   }
 
+  private def getDateString(dateValue: Date): String = {
+"TO_DATE('%s', '%s', '%s')".format(dateformatter.format(dateValue),
+  DateUtil.DEFAULT_DATE_FORMAT, DateUtil.DEFAULT_TIME_ZONE_ID)
+  }
+
   // Helper function to escape column key to work with SQL queries
   private def escapeKey(key: String): String = 
SchemaUtil.getEscapedFullColumnName(key)
 



[phoenix] branch master updated: PHOENIX-5102 Filtering on DATE types throws an exception using the spark connector

2019-01-16 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new ebb3429  PHOENIX-5102 Filtering on DATE types throws an exception 
using the spark connector
ebb3429 is described below

commit ebb3429859af333e36afdc8dc172017bd03e7818
Author: Thomas D'Silva 
AuthorDate: Tue Jan 15 16:56:15 2019 -0800

PHOENIX-5102 Filtering on DATE types throws an exception using the spark 
connector
---
 .../src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala   | 6 +-
 .../org/apache/phoenix/spark/FilterExpressionCompiler.scala  | 9 +
 2 files changed, 14 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index d6d0f92..b40b638 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -624,7 +624,7 @@ class PhoenixSparkIT extends AbstractPhoenixSparkIT {
 varByteArray shouldEqual dataSet(0).get(3)
   }
 
-  test("Can load Phoenix DATE columns through DataFrame API") {
+  test("Can load and filter Phoenix DATE columns through DataFrame API") {
 val df = spark.sqlContext.read
   .format("phoenix")
   .options(Map("table" -> "DATE_TEST", PhoenixDataSource.ZOOKEEPER_URL -> 
quorumAddress))
@@ -638,6 +638,10 @@ class PhoenixSparkIT extends AbstractPhoenixSparkIT {
 // Note that Spark also applies the timezone offset to the returned date 
epoch. Rather than perform timezone
 // gymnastics, just make sure we're within 24H of the epoch generated just 
now
 assert(Math.abs(epoch - dt) < 8640)
+
+df.createOrReplaceTempView("DATE_TEST")
+val df2 = spark.sql("SELECT * FROM DATE_TEST WHERE COL1 > 
TO_DATE('1990-01-01 00:00:01', '-MM-dd HH:mm:ss')")
+assert(df2.count() == 1L)
   }
 
   test("Filter operation doesn't work for column names containing a white 
space (PHOENIX-2547)") {
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
index 74ff67e..1d6973c 100644
--- 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
+++ 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.spark
 
+import java.sql.Date
 import java.sql.Timestamp
 import java.text.Format
 
@@ -26,6 +27,7 @@ import org.apache.spark.sql.sources._
 
 class FilterExpressionCompiler() {
 
+  val dateformatter:Format = 
DateUtil.getDateFormatter(DateUtil.DEFAULT_DATE_FORMAT, 
DateUtil.DEFAULT_TIME_ZONE_ID)
   val timeformatter:Format = 
DateUtil.getTimestampFormatter(DateUtil.DEFAULT_TIME_FORMAT, 
DateUtil.DEFAULT_TIME_ZONE_ID)
 
   /**
@@ -102,6 +104,8 @@ class FilterExpressionCompiler() {
 
 case timestampValue: Timestamp => getTimestampString(timestampValue)
 
+case dateValue: Date => getDateString(dateValue)
+
 // Borrowed from 'elasticsearch-hadoop', support these internal UTF types 
across Spark versions
 // Spark 1.4
 case utf if (isClass(utf, "org.apache.spark.sql.types.UTF8String")) => 
s"'${escapeStringConstant(utf.toString)}'"
@@ -117,6 +121,11 @@ class FilterExpressionCompiler() {
   DateUtil.DEFAULT_TIME_FORMAT, DateUtil.DEFAULT_TIME_ZONE_ID)
   }
 
+  private def getDateString(dateValue: Date): String = {
+"TO_DATE('%s', '%s', '%s')".format(dateformatter.format(dateValue),
+  DateUtil.DEFAULT_DATE_FORMAT, DateUtil.DEFAULT_TIME_ZONE_ID)
+  }
+
   // Helper function to escape column key to work with SQL queries
   private def escapeKey(key: String): String = 
SchemaUtil.getEscapedFullColumnName(key)
 



[phoenix] branch 4.x-HBase-1.4 updated (4a745a7 -> 8273cc1)

2019-01-16 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 4a745a7  PHOENIX-5073 modify index state based on client version to 
support old clients
 new 29e76fd  PHOENIX-5100 Add test to verify IndexTool can build global 
view indexes
 new 8273cc1  PHOENIX-5102 Filtering on DATE types throws an exception 
using the spark connector

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../apache/phoenix/end2end/index/ViewIndexIT.java  | 102 +
 .../org/apache/phoenix/spark/PhoenixSparkIT.scala  |   6 +-
 .../phoenix/spark/FilterExpressionCompiler.scala   |   9 ++
 3 files changed, 76 insertions(+), 41 deletions(-)



[phoenix] branch 4.x-HBase-1.2 updated: PHOENIX-5102 Filtering on DATE types throws an exception using the spark connector

2019-01-16 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new 9a1a494  PHOENIX-5102 Filtering on DATE types throws an exception 
using the spark connector
9a1a494 is described below

commit 9a1a494fe012031c00ee3c9fba35fb74e988500f
Author: Thomas D'Silva 
AuthorDate: Tue Jan 15 16:56:15 2019 -0800

PHOENIX-5102 Filtering on DATE types throws an exception using the spark 
connector
---
 .../src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala   | 6 +-
 .../org/apache/phoenix/spark/FilterExpressionCompiler.scala  | 9 +
 2 files changed, 14 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index d6d0f92..b40b638 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -624,7 +624,7 @@ class PhoenixSparkIT extends AbstractPhoenixSparkIT {
 varByteArray shouldEqual dataSet(0).get(3)
   }
 
-  test("Can load Phoenix DATE columns through DataFrame API") {
+  test("Can load and filter Phoenix DATE columns through DataFrame API") {
 val df = spark.sqlContext.read
   .format("phoenix")
   .options(Map("table" -> "DATE_TEST", PhoenixDataSource.ZOOKEEPER_URL -> 
quorumAddress))
@@ -638,6 +638,10 @@ class PhoenixSparkIT extends AbstractPhoenixSparkIT {
 // Note that Spark also applies the timezone offset to the returned date 
epoch. Rather than perform timezone
 // gymnastics, just make sure we're within 24H of the epoch generated just 
now
 assert(Math.abs(epoch - dt) < 8640)
+
+df.createOrReplaceTempView("DATE_TEST")
+val df2 = spark.sql("SELECT * FROM DATE_TEST WHERE COL1 > 
TO_DATE('1990-01-01 00:00:01', '-MM-dd HH:mm:ss')")
+assert(df2.count() == 1L)
   }
 
   test("Filter operation doesn't work for column names containing a white 
space (PHOENIX-2547)") {
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
index 74ff67e..1d6973c 100644
--- 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
+++ 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/FilterExpressionCompiler.scala
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.spark
 
+import java.sql.Date
 import java.sql.Timestamp
 import java.text.Format
 
@@ -26,6 +27,7 @@ import org.apache.spark.sql.sources._
 
 class FilterExpressionCompiler() {
 
+  val dateformatter:Format = 
DateUtil.getDateFormatter(DateUtil.DEFAULT_DATE_FORMAT, 
DateUtil.DEFAULT_TIME_ZONE_ID)
   val timeformatter:Format = 
DateUtil.getTimestampFormatter(DateUtil.DEFAULT_TIME_FORMAT, 
DateUtil.DEFAULT_TIME_ZONE_ID)
 
   /**
@@ -102,6 +104,8 @@ class FilterExpressionCompiler() {
 
 case timestampValue: Timestamp => getTimestampString(timestampValue)
 
+case dateValue: Date => getDateString(dateValue)
+
 // Borrowed from 'elasticsearch-hadoop', support these internal UTF types 
across Spark versions
 // Spark 1.4
 case utf if (isClass(utf, "org.apache.spark.sql.types.UTF8String")) => 
s"'${escapeStringConstant(utf.toString)}'"
@@ -117,6 +121,11 @@ class FilterExpressionCompiler() {
   DateUtil.DEFAULT_TIME_FORMAT, DateUtil.DEFAULT_TIME_ZONE_ID)
   }
 
+  private def getDateString(dateValue: Date): String = {
+"TO_DATE('%s', '%s', '%s')".format(dateformatter.format(dateValue),
+  DateUtil.DEFAULT_DATE_FORMAT, DateUtil.DEFAULT_TIME_ZONE_ID)
+  }
+
   // Helper function to escape column key to work with SQL queries
   private def escapeKey(key: String): String = 
SchemaUtil.getEscapedFullColumnName(key)
 



[phoenix] branch master updated: PHOENIX-5100 Add test to verify IndexTool can build global view indexes

2019-01-15 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 261e010  PHOENIX-5100 Add test to verify IndexTool can build global 
view indexes
261e010 is described below

commit 261e010bc4f699dd2fbf5d44953372291c75e10c
Author: Thomas D'Silva 
AuthorDate: Mon Jan 14 14:14:31 2019 -0800

PHOENIX-5100 Add test to verify IndexTool can build global view indexes
---
 .../apache/phoenix/end2end/index/ViewIndexIT.java  | 102 +
 1 file changed, 62 insertions(+), 40 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
index 11cfc5c9..5fd023d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
@@ -39,9 +39,11 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.end2end.IndexToolIT;
 import org.apache.phoenix.end2end.SplitSystemCatalogIT;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.mapreduce.index.IndexTool;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PNameFactory;
@@ -218,50 +220,70 @@ public class ViewIndexIT extends SplitSystemCatalogIT {
 
 
 @Test
-public void testCreatingIndexOnGlobalView() throws Exception {
+public void testMultiTenantViewGlobalIndex() throws Exception {
 String baseTable =  SchemaUtil.getTableName(SCHEMA1, 
generateUniqueName());
-String globalView = SchemaUtil.getTableName(SCHEMA2, 
generateUniqueName());
+String globalViewName = generateUniqueName();
+String fullGlobalViewName = SchemaUtil.getTableName(SCHEMA2, 
globalViewName);
 String globalViewIdx =  generateUniqueName();
+String tenantView =  generateUniqueName();
 String fullIndexName = SchemaUtil.getTableName(SCHEMA2, globalViewIdx);
 try (Connection conn = DriverManager.getConnection(getUrl())) {
-conn.createStatement().execute("CREATE IMMUTABLE TABLE " + 
baseTable + " (TENANT_ID CHAR(15) NOT NULL, PK2 DATE NOT NULL, PK3 INTEGER NOT 
NULL, KV1 VARCHAR, KV2 VARCHAR, KV3 CHAR(15) CONSTRAINT PK PRIMARY 
KEY(TENANT_ID, PK2 ROW_TIMESTAMP, PK3)) MULTI_TENANT=true");
-conn.createStatement().execute("CREATE VIEW " + globalView + " AS 
SELECT * FROM " + baseTable);
-conn.createStatement().execute("CREATE INDEX " + globalViewIdx + " 
ON " + globalView + " (PK3 DESC, KV3) INCLUDE (KV1)");
-PreparedStatement stmt = conn.prepareStatement("UPSERT INTO  " + 
globalView + " (TENANT_ID, PK2, PK3, KV1, KV3) VALUES (?, ?, ?, ?, ?)");
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 1);
-stmt.setString(4, "KV1");
-stmt.setString(5, "KV3");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 2);
-stmt.setString(4, "KV4");
-stmt.setString(5, "KV5");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 3);
-stmt.setString(4, "KV6");
-stmt.setString(5, "KV7");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 4);
-stmt.setString(4, "KV8");
-stmt.setString(5, "KV9");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 5);
-stmt.setString(4, "KV10");
-stmt.setString(5, "KV11");
-stmt.executeUpdate();
-conn.commit();
-
+conn.createStatement().execute("CREATE TABLE " + baseTable + " 
(TENANT_ID CHAR(15) NOT NULL, PK2 DATE NOT NULL, PK3 INTEGER NOT NULL, KV1 
VARCHAR, KV2 VARCHAR, KV3 CHAR(15) CONSTRAINT PK PRIMARY KEY(TENANT_ID, PK2, 
PK3)) MULTI_TENANT=true");
+conn.createStatement().execute("CREATE VIEW " + fullGlobalViewName 
+ " AS SELECT * FROM " + baseTable);
+ 

[phoenix] branch 4.x-HBase-1.2 updated: Add test to verify IndexTool can build global view indexes

2019-01-15 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new acc2a37  Add test to verify IndexTool can build global view indexes
acc2a37 is described below

commit acc2a3747c547298d5ada8c00dfa6fe4daccc264
Author: Thomas D'Silva 
AuthorDate: Mon Jan 14 14:14:31 2019 -0800

Add test to verify IndexTool can build global view indexes
---
 .../apache/phoenix/end2end/index/ViewIndexIT.java  | 102 +
 1 file changed, 62 insertions(+), 40 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
index 11cfc5c9..5fd023d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
@@ -39,9 +39,11 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.end2end.IndexToolIT;
 import org.apache.phoenix.end2end.SplitSystemCatalogIT;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.mapreduce.index.IndexTool;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PNameFactory;
@@ -218,50 +220,70 @@ public class ViewIndexIT extends SplitSystemCatalogIT {
 
 
 @Test
-public void testCreatingIndexOnGlobalView() throws Exception {
+public void testMultiTenantViewGlobalIndex() throws Exception {
 String baseTable =  SchemaUtil.getTableName(SCHEMA1, 
generateUniqueName());
-String globalView = SchemaUtil.getTableName(SCHEMA2, 
generateUniqueName());
+String globalViewName = generateUniqueName();
+String fullGlobalViewName = SchemaUtil.getTableName(SCHEMA2, 
globalViewName);
 String globalViewIdx =  generateUniqueName();
+String tenantView =  generateUniqueName();
 String fullIndexName = SchemaUtil.getTableName(SCHEMA2, globalViewIdx);
 try (Connection conn = DriverManager.getConnection(getUrl())) {
-conn.createStatement().execute("CREATE IMMUTABLE TABLE " + 
baseTable + " (TENANT_ID CHAR(15) NOT NULL, PK2 DATE NOT NULL, PK3 INTEGER NOT 
NULL, KV1 VARCHAR, KV2 VARCHAR, KV3 CHAR(15) CONSTRAINT PK PRIMARY 
KEY(TENANT_ID, PK2 ROW_TIMESTAMP, PK3)) MULTI_TENANT=true");
-conn.createStatement().execute("CREATE VIEW " + globalView + " AS 
SELECT * FROM " + baseTable);
-conn.createStatement().execute("CREATE INDEX " + globalViewIdx + " 
ON " + globalView + " (PK3 DESC, KV3) INCLUDE (KV1)");
-PreparedStatement stmt = conn.prepareStatement("UPSERT INTO  " + 
globalView + " (TENANT_ID, PK2, PK3, KV1, KV3) VALUES (?, ?, ?, ?, ?)");
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 1);
-stmt.setString(4, "KV1");
-stmt.setString(5, "KV3");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 2);
-stmt.setString(4, "KV4");
-stmt.setString(5, "KV5");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 3);
-stmt.setString(4, "KV6");
-stmt.setString(5, "KV7");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 4);
-stmt.setString(4, "KV8");
-stmt.setString(5, "KV9");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 5);
-stmt.setString(4, "KV10");
-stmt.setString(5, "KV11");
-stmt.executeUpdate();
-conn.commit();
-
+conn.createStatement().execute("CREATE TABLE " + baseTable + " 
(TENANT_ID CHAR(15) NOT NULL, PK2 DATE NOT NULL, PK3 INTEGER NOT NULL, KV1 
VARCHAR, KV2 VARCHAR, KV3 CHAR(15) CONSTRAINT PK PRIMARY KEY(TENANT_ID, PK2, 
PK3)) MULTI_TENANT=true");
+conn.createStatement().execute("CREATE VIEW " + fullGlobalViewName 
+ " AS SELECT * FROM " + baseTable);
+conn.createStatement().execute(

[phoenix] branch 4.x-HBase-1.3 updated: Add test to verify IndexTool can build global view indexes

2019-01-15 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new e0569b8  Add test to verify IndexTool can build global view indexes
e0569b8 is described below

commit e0569b85a1e98a1bdc243b2ca7380bc9e5789daf
Author: Thomas D'Silva 
AuthorDate: Mon Jan 14 14:14:31 2019 -0800

Add test to verify IndexTool can build global view indexes
---
 .../apache/phoenix/end2end/index/ViewIndexIT.java  | 102 +
 1 file changed, 62 insertions(+), 40 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
index 11cfc5c9..5fd023d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
@@ -39,9 +39,11 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.end2end.IndexToolIT;
 import org.apache.phoenix.end2end.SplitSystemCatalogIT;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.mapreduce.index.IndexTool;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PNameFactory;
@@ -218,50 +220,70 @@ public class ViewIndexIT extends SplitSystemCatalogIT {
 
 
 @Test
-public void testCreatingIndexOnGlobalView() throws Exception {
+public void testMultiTenantViewGlobalIndex() throws Exception {
 String baseTable =  SchemaUtil.getTableName(SCHEMA1, 
generateUniqueName());
-String globalView = SchemaUtil.getTableName(SCHEMA2, 
generateUniqueName());
+String globalViewName = generateUniqueName();
+String fullGlobalViewName = SchemaUtil.getTableName(SCHEMA2, 
globalViewName);
 String globalViewIdx =  generateUniqueName();
+String tenantView =  generateUniqueName();
 String fullIndexName = SchemaUtil.getTableName(SCHEMA2, globalViewIdx);
 try (Connection conn = DriverManager.getConnection(getUrl())) {
-conn.createStatement().execute("CREATE IMMUTABLE TABLE " + 
baseTable + " (TENANT_ID CHAR(15) NOT NULL, PK2 DATE NOT NULL, PK3 INTEGER NOT 
NULL, KV1 VARCHAR, KV2 VARCHAR, KV3 CHAR(15) CONSTRAINT PK PRIMARY 
KEY(TENANT_ID, PK2 ROW_TIMESTAMP, PK3)) MULTI_TENANT=true");
-conn.createStatement().execute("CREATE VIEW " + globalView + " AS 
SELECT * FROM " + baseTable);
-conn.createStatement().execute("CREATE INDEX " + globalViewIdx + " 
ON " + globalView + " (PK3 DESC, KV3) INCLUDE (KV1)");
-PreparedStatement stmt = conn.prepareStatement("UPSERT INTO  " + 
globalView + " (TENANT_ID, PK2, PK3, KV1, KV3) VALUES (?, ?, ?, ?, ?)");
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 1);
-stmt.setString(4, "KV1");
-stmt.setString(5, "KV3");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 2);
-stmt.setString(4, "KV4");
-stmt.setString(5, "KV5");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 3);
-stmt.setString(4, "KV6");
-stmt.setString(5, "KV7");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 4);
-stmt.setString(4, "KV8");
-stmt.setString(5, "KV9");
-stmt.executeUpdate();
-stmt.setString(1, "tenantId");
-stmt.setDate(2, new Date(100));
-stmt.setInt(3, 5);
-stmt.setString(4, "KV10");
-stmt.setString(5, "KV11");
-stmt.executeUpdate();
-conn.commit();
-
+conn.createStatement().execute("CREATE TABLE " + baseTable + " 
(TENANT_ID CHAR(15) NOT NULL, PK2 DATE NOT NULL, PK3 INTEGER NOT NULL, KV1 
VARCHAR, KV2 VARCHAR, KV3 CHAR(15) CONSTRAINT PK PRIMARY KEY(TENANT_ID, PK2, 
PK3)) MULTI_TENANT=true");
+conn.createStatement().execute("CREATE VIEW " + fullGlobalViewName 
+ " AS SELECT * FROM " + baseTable);
+conn.createStatement().execute(

[phoenix] branch master updated: modify index state based on client version to support old clients

2019-01-09 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 3bd426f  modify index state based on client version to support old 
clients
3bd426f is described below

commit 3bd426f10337b6e109ab7394bd4a4023039fd0e8
Author: kiran.maturi 
AuthorDate: Wed Jan 9 11:36:26 2019 +0530

modify index state based on client version to support old clients
---
 .../index/InvalidIndexStateClientSideIT.java   | 144 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  41 --
 2 files changed, 174 insertions(+), 11 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
new file mode 100644
index 000..5c1b4b5
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MAJOR_VERSION;
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_PATCH_NUMBER;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils.BlockingRpcCallback;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetTableRequest;
+import 
org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService;
+import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.hbase.index.util.VersionUtil;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.Closeables;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Test;
+
+public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
+private static final Log LOG = 
LogFactory.getLog(InvalidIndexStateClientSideIT.class);
+
+@Test
+public void testCachedConnections() throws Throwable {
+final String schemaName = generateUniqueName();
+final String tableName = generateUniqueName();
+final String fullTableName = SchemaUtil.getTableName(schemaName, 
tableName);
+final String indexName = generateUniqueName();
+final String fullIndexName = SchemaUtil.getTableName(schemaName, 
indexName);
+final Connection conn = DriverManager.getConnection(getUrl());
+
+// create table and indices
+String createTableSql =
+"CREATE TABLE " + fullTableName
++ "(org_id VARCHAR NOT NULL PRIMARY KEY, v1 INTEGER, 
v2 INTEGER, v3 INTEGER)";
+conn.createStatement().execute(createTableSql);
+conn.createStatement()
+.execute("CREATE INDEX " + indexName + " ON " + fullTableName 
+ "(v1)");
+conn.commit();
+PhoenixConnection phoenixConn = conn.unwrap(PhoenixConnection.class);
+ConnectionQueryS

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5073 modify index state based on client version to support old clients

2019-01-09 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 4a745a7  PHOENIX-5073 modify index state based on client version to 
support old clients
4a745a7 is described below

commit 4a745a7bbf200ac4a5051bd592c6f1f759d5a124
Author: kiran.maturi 
AuthorDate: Fri Dec 28 14:46:19 2018 +0530

PHOENIX-5073 modify index state based on client version to support old 
clients
---
 .../index/InvalidIndexStateClientSideIT.java   | 145 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  41 --
 2 files changed, 175 insertions(+), 11 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
new file mode 100644
index 000..7052ade
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MAJOR_VERSION;
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_PATCH_NUMBER;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetTableRequest;
+import 
org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService;
+import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.hbase.index.util.VersionUtil;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.Closeables;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Test;
+
+import com.sun.org.apache.commons.logging.Log;
+import com.sun.org.apache.commons.logging.LogFactory;
+
+public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
+private static final Log LOG = 
LogFactory.getLog(InvalidIndexStateClientSideIT.class);
+
+@Test
+public void testCachedConnections() throws Throwable {
+final String schemaName = generateUniqueName();
+final String tableName = generateUniqueName();
+final String fullTableName = SchemaUtil.getTableName(schemaName, 
tableName);
+final String indexName = generateUniqueName();
+final String fullIndexName = SchemaUtil.getTableName(schemaName, 
indexName);
+final Connection conn = DriverManager.getConnection(getUrl());
+
+// create table and indices
+String createTableSql =
+"CREATE TABLE " + fullTableName
++ "(org_id VARCHAR NOT NULL PRIMARY KEY, v1 INTEGER, 
v2 INTEGER, v3 INTEGER)";
+conn.createStatement().execute(createTableSql);
+conn.createStatement()
+.execute("CREATE INDEX " + indexName + " ON " + fullTableName 
+ "(v1)");
+conn.commit();
+PhoenixConnection phoenixConn = conn.unwrap(Ph

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5073 modify index state based on client version to support old clients

2019-01-09 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 0e01a15  PHOENIX-5073 modify index state based on client version to 
support old clients
0e01a15 is described below

commit 0e01a15acf02a2679a90d7f14870719480aa9adf
Author: kiran.maturi 
AuthorDate: Fri Dec 28 14:46:19 2018 +0530

PHOENIX-5073 modify index state based on client version to support old 
clients
---
 .../index/InvalidIndexStateClientSideIT.java   | 145 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  41 --
 2 files changed, 175 insertions(+), 11 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
new file mode 100644
index 000..7052ade
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MAJOR_VERSION;
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_PATCH_NUMBER;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetTableRequest;
+import 
org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService;
+import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.hbase.index.util.VersionUtil;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.Closeables;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Test;
+
+import com.sun.org.apache.commons.logging.Log;
+import com.sun.org.apache.commons.logging.LogFactory;
+
+public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
+private static final Log LOG = 
LogFactory.getLog(InvalidIndexStateClientSideIT.class);
+
+@Test
+public void testCachedConnections() throws Throwable {
+final String schemaName = generateUniqueName();
+final String tableName = generateUniqueName();
+final String fullTableName = SchemaUtil.getTableName(schemaName, 
tableName);
+final String indexName = generateUniqueName();
+final String fullIndexName = SchemaUtil.getTableName(schemaName, 
indexName);
+final Connection conn = DriverManager.getConnection(getUrl());
+
+// create table and indices
+String createTableSql =
+"CREATE TABLE " + fullTableName
++ "(org_id VARCHAR NOT NULL PRIMARY KEY, v1 INTEGER, 
v2 INTEGER, v3 INTEGER)";
+conn.createStatement().execute(createTableSql);
+conn.createStatement()
+.execute("CREATE INDEX " + indexName + " ON " + fullTableName 
+ "(v1)");
+conn.commit();
+PhoenixConnection phoenixConn = conn.unwrap(Ph

[phoenix] branch 4.x-HBase-1.2 updated: PHOENIX-5073 modify index state based on client version to support old clients

2019-01-09 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new defb57a  PHOENIX-5073 modify index state based on client version to 
support old clients
defb57a is described below

commit defb57a09a50c3038421688213a10754fcb2897d
Author: kiran.maturi 
AuthorDate: Fri Dec 28 14:46:19 2018 +0530

PHOENIX-5073 modify index state based on client version to support old 
clients
---
 .../index/InvalidIndexStateClientSideIT.java   | 145 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  41 --
 2 files changed, 175 insertions(+), 11 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
new file mode 100644
index 000..7052ade
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MAJOR_VERSION;
+import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_PATCH_NUMBER;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetTableRequest;
+import 
org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService;
+import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.hbase.index.util.VersionUtil;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.Closeables;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Test;
+
+import com.sun.org.apache.commons.logging.Log;
+import com.sun.org.apache.commons.logging.LogFactory;
+
+public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
+private static final Log LOG = 
LogFactory.getLog(InvalidIndexStateClientSideIT.class);
+
+@Test
+public void testCachedConnections() throws Throwable {
+final String schemaName = generateUniqueName();
+final String tableName = generateUniqueName();
+final String fullTableName = SchemaUtil.getTableName(schemaName, 
tableName);
+final String indexName = generateUniqueName();
+final String fullIndexName = SchemaUtil.getTableName(schemaName, 
indexName);
+final Connection conn = DriverManager.getConnection(getUrl());
+
+// create table and indices
+String createTableSql =
+"CREATE TABLE " + fullTableName
++ "(org_id VARCHAR NOT NULL PRIMARY KEY, v1 INTEGER, 
v2 INTEGER, v3 INTEGER)";
+conn.createStatement().execute(createTableSql);
+conn.createStatement()
+.execute("CREATE INDEX " + indexName + " ON " + fullTableName 
+ "(v1)");
+conn.commit();
+PhoenixConnection phoenixConn = conn.unwrap(Ph

[phoenix] branch master updated: PHOENIX-5059 Use the Datasource v2 api in the spark connector (addendum)

2019-01-09 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 7d6d8e4  PHOENIX-5059 Use the Datasource v2 api in the spark connector 
(addendum)
7d6d8e4 is described below

commit 7d6d8e478b54448b88ede3f80ae245f627cb
Author: Thomas D'Silva 
AuthorDate: Wed Jan 9 17:08:19 2019 -0800

PHOENIX-5059 Use the Datasource v2 api in the spark connector (addendum)
---
 .../phoenix/spark/datasource/v2/reader/PhoenixDataSourceReader.java| 2 +-
 .../spark/datasource/v2/reader/PhoenixInputPartitionReader.java| 3 ++-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixDataSourceReader.java
 
b/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixDataSourceReader.java
index 446d96f..c76d9c8 100644
--- 
a/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixDataSourceReader.java
+++ 
b/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixDataSourceReader.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.util.RegionSizeCalculator;
+import org.apache.hadoop.hbase.mapreduce.RegionSizeCalculator;
 import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.iterate.MapReduceParallelScanGrouper;
 import org.apache.phoenix.jdbc.PhoenixConnection;
diff --git 
a/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixInputPartitionReader.java
 
b/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixInputPartitionReader.java
index 30e84db..664a887 100644
--- 
a/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixInputPartitionReader.java
+++ 
b/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixInputPartitionReader.java
@@ -25,6 +25,7 @@ import java.sql.Statement;
 import java.util.List;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.compile.QueryPlan;
@@ -108,7 +109,7 @@ public class PhoenixInputPartitionReader implements 
InputPartitionReader

[phoenix] branch master updated: PHOENIX-5059 Use the Datasource v2 api in the spark connector

2019-01-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 583cdd3  PHOENIX-5059 Use the Datasource v2 api in the spark connector
583cdd3 is described below

commit 583cdd3e27b80f16ac758f2b0d53d8814a19bdc2
Author: Thomas D'Silva 
AuthorDate: Tue Dec 11 14:59:39 2018 -0800

PHOENIX-5059 Use the Datasource v2 api in the spark connector
---
 .../phoenix/end2end/salted/BaseSaltedTableIT.java  |   6 +-
 phoenix-spark/pom.xml  |   8 +
 .../java/org/apache/phoenix/spark/OrderByIT.java   |  92 ++--
 .../java/org/apache/phoenix/spark/SparkUtil.java   |  25 +-
 phoenix-spark/src/it/resources/globalSetup.sql |   6 +-
 .../phoenix/spark/AbstractPhoenixSparkIT.scala |  12 +-
 .../org/apache/phoenix/spark/PhoenixSparkIT.scala  | 541 +++--
 .../spark/PhoenixSparkITTenantSpecific.scala   |  18 +-
 .../spark/datasource/v2/PhoenixDataSource.java |  82 
 .../v2/reader/PhoenixDataSourceReadOptions.java|  51 ++
 .../v2/reader/PhoenixDataSourceReader.java | 201 
 .../v2/reader/PhoenixInputPartition.java   |  44 ++
 .../v2/reader/PhoenixInputPartitionReader.java | 168 +++
 .../v2/writer/PhoenixDataSourceWriteOptions.java   | 109 +
 .../datasource/v2/writer/PhoenixDataWriter.java| 100 
 .../v2/writer/PhoenixDataWriterFactory.java|  19 +
 .../v2/writer/PhoenixDatasourceWriter.java |  34 ++
 ...org.apache.spark.sql.sources.DataSourceRegister |   1 +
 .../apache/phoenix/spark/ConfigurationUtil.scala   |   1 +
 .../apache/phoenix/spark/DataFrameFunctions.scala  |   2 +-
 .../org/apache/phoenix/spark/DefaultSource.scala   |   1 +
 ...lation.scala => FilterExpressionCompiler.scala} | 109 ++---
 .../org/apache/phoenix/spark/PhoenixRDD.scala  |  61 +--
 .../phoenix/spark/PhoenixRecordWritable.scala  |   2 +-
 .../org/apache/phoenix/spark/PhoenixRelation.scala |  70 +--
 .../apache/phoenix/spark/ProductRDDFunctions.scala |   1 +
 .../phoenix/spark/SparkContextFunctions.scala  |   1 +
 .../org/apache/phoenix/spark/SparkSchemaUtil.scala |  84 
 .../phoenix/spark/SparkSqlContextFunctions.scala   |   1 +
 .../datasources/jdbc/PhoenixJdbcDialect.scala  |  21 +
 .../execution/datasources/jdbc/SparkJdbcUtil.scala | 309 
 pom.xml|   2 +-
 32 files changed, 1655 insertions(+), 527 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
index 3051cd6..ef127ac 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
@@ -194,7 +194,7 @@ public abstract class BaseSaltedTableIT extends 
ParallelStatsDisabledIT  {
 .setSelectColumns(
 Lists.newArrayList("A_INTEGER", "A_STRING", "A_ID", 
"B_STRING", "B_INTEGER"))
 .setFullTableName(tableName)
-.setWhereClause("a_integer = 1 AND a_string >= 'ab' AND 
a_string < 'de' AND a_id = '123'");
+.setWhereClause("A_INTEGER = 1 AND A_STRING >= 'ab' AND 
A_STRING < 'de' AND A_ID = '123'");
 rs = executeQuery(conn, queryBuilder);
 assertTrue(rs.next());
 assertEquals(1, rs.getInt(1));
@@ -205,7 +205,7 @@ public abstract class BaseSaltedTableIT extends 
ParallelStatsDisabledIT  {
 assertFalse(rs.next());
 
 // all single slots with one value.
-queryBuilder.setWhereClause("a_integer = 1 AND a_string = 'ab' AND 
a_id = '123'");
+queryBuilder.setWhereClause("A_INTEGER = 1 AND A_STRING = 'ab' AND 
A_ID = '123'");
 rs = executeQuery(conn, queryBuilder);
 assertTrue(rs.next());
 assertEquals(1, rs.getInt(1));
@@ -216,7 +216,7 @@ public abstract class BaseSaltedTableIT extends 
ParallelStatsDisabledIT  {
 assertFalse(rs.next());
 
 // all single slots with multiple values.
-queryBuilder.setWhereClause("a_integer in (2, 4) AND a_string = 
'abc' AND a_id = '123'");
+queryBuilder.setWhereClause("A_INTEGER in (2, 4) AND A_STRING = 
'abc' AND A_ID = '123'");
 rs = executeQuery(conn, queryBuilder);
 
 assertTrue(rs.next());
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index db511b5..c71c92a

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5059 Use the Datasource v2 api in the spark connector

2019-01-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new f86b97b  PHOENIX-5059 Use the Datasource v2 api in the spark connector
f86b97b is described below

commit f86b97b00d3935a699eaa8fd122463e468b42cd4
Author: Thomas D'Silva 
AuthorDate: Tue Dec 11 14:59:39 2018 -0800

PHOENIX-5059 Use the Datasource v2 api in the spark connector
---
 .../phoenix/end2end/salted/BaseSaltedTableIT.java  |   6 +-
 phoenix-spark/pom.xml  |   8 +
 .../java/org/apache/phoenix/spark/OrderByIT.java   |  92 ++--
 .../java/org/apache/phoenix/spark/SparkUtil.java   |  25 +-
 phoenix-spark/src/it/resources/globalSetup.sql |   6 +-
 .../phoenix/spark/AbstractPhoenixSparkIT.scala |  12 +-
 .../org/apache/phoenix/spark/PhoenixSparkIT.scala  | 541 +++--
 .../spark/PhoenixSparkITTenantSpecific.scala   |  18 +-
 .../spark/datasource/v2/PhoenixDataSource.java |  82 
 .../v2/reader/PhoenixDataSourceReadOptions.java|  51 ++
 .../v2/reader/PhoenixDataSourceReader.java | 201 
 .../v2/reader/PhoenixInputPartition.java   |  44 ++
 .../v2/reader/PhoenixInputPartitionReader.java | 168 +++
 .../v2/writer/PhoenixDataSourceWriteOptions.java   | 109 +
 .../datasource/v2/writer/PhoenixDataWriter.java| 100 
 .../v2/writer/PhoenixDataWriterFactory.java|  19 +
 .../v2/writer/PhoenixDatasourceWriter.java |  34 ++
 ...org.apache.spark.sql.sources.DataSourceRegister |   1 +
 .../apache/phoenix/spark/ConfigurationUtil.scala   |   1 +
 .../apache/phoenix/spark/DataFrameFunctions.scala  |   2 +-
 .../org/apache/phoenix/spark/DefaultSource.scala   |   1 +
 ...lation.scala => FilterExpressionCompiler.scala} | 109 ++---
 .../org/apache/phoenix/spark/PhoenixRDD.scala  |  61 +--
 .../phoenix/spark/PhoenixRecordWritable.scala  |   2 +-
 .../org/apache/phoenix/spark/PhoenixRelation.scala |  70 +--
 .../apache/phoenix/spark/ProductRDDFunctions.scala |   1 +
 .../phoenix/spark/SparkContextFunctions.scala  |   1 +
 .../org/apache/phoenix/spark/SparkSchemaUtil.scala |  84 
 .../phoenix/spark/SparkSqlContextFunctions.scala   |   1 +
 .../datasources/jdbc/PhoenixJdbcDialect.scala  |  21 +
 .../execution/datasources/jdbc/SparkJdbcUtil.scala | 309 
 pom.xml|   2 +-
 32 files changed, 1655 insertions(+), 527 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
index 3051cd6..ef127ac 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
@@ -194,7 +194,7 @@ public abstract class BaseSaltedTableIT extends 
ParallelStatsDisabledIT  {
 .setSelectColumns(
 Lists.newArrayList("A_INTEGER", "A_STRING", "A_ID", 
"B_STRING", "B_INTEGER"))
 .setFullTableName(tableName)
-.setWhereClause("a_integer = 1 AND a_string >= 'ab' AND 
a_string < 'de' AND a_id = '123'");
+.setWhereClause("A_INTEGER = 1 AND A_STRING >= 'ab' AND 
A_STRING < 'de' AND A_ID = '123'");
 rs = executeQuery(conn, queryBuilder);
 assertTrue(rs.next());
 assertEquals(1, rs.getInt(1));
@@ -205,7 +205,7 @@ public abstract class BaseSaltedTableIT extends 
ParallelStatsDisabledIT  {
 assertFalse(rs.next());
 
 // all single slots with one value.
-queryBuilder.setWhereClause("a_integer = 1 AND a_string = 'ab' AND 
a_id = '123'");
+queryBuilder.setWhereClause("A_INTEGER = 1 AND A_STRING = 'ab' AND 
A_ID = '123'");
 rs = executeQuery(conn, queryBuilder);
 assertTrue(rs.next());
 assertEquals(1, rs.getInt(1));
@@ -216,7 +216,7 @@ public abstract class BaseSaltedTableIT extends 
ParallelStatsDisabledIT  {
 assertFalse(rs.next());
 
 // all single slots with multiple values.
-queryBuilder.setWhereClause("a_integer in (2, 4) AND a_string = 
'abc' AND a_id = '123'");
+queryBuilder.setWhereClause("A_INTEGER in (2, 4) AND A_STRING = 
'abc' AND A_ID = '123'");
 rs = executeQuery(conn, queryBuilder);
 
 assertTrue(rs.next());
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 038e314..f426c83

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5059 Use the Datasource v2 api in the spark connector

2019-01-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 7138572  PHOENIX-5059 Use the Datasource v2 api in the spark connector
7138572 is described below

commit 71385723ee08f1cd68269f9770e60847b6f8a1fc
Author: Thomas D'Silva 
AuthorDate: Tue Dec 11 14:59:39 2018 -0800

PHOENIX-5059 Use the Datasource v2 api in the spark connector
---
 .../phoenix/end2end/salted/BaseSaltedTableIT.java  |   6 +-
 phoenix-spark/pom.xml  |   8 +
 .../java/org/apache/phoenix/spark/OrderByIT.java   |  92 ++--
 .../java/org/apache/phoenix/spark/SparkUtil.java   |  25 +-
 phoenix-spark/src/it/resources/globalSetup.sql |   6 +-
 .../phoenix/spark/AbstractPhoenixSparkIT.scala |  12 +-
 .../org/apache/phoenix/spark/PhoenixSparkIT.scala  | 541 +++--
 .../spark/PhoenixSparkITTenantSpecific.scala   |  18 +-
 .../spark/datasource/v2/PhoenixDataSource.java |  82 
 .../v2/reader/PhoenixDataSourceReadOptions.java|  51 ++
 .../v2/reader/PhoenixDataSourceReader.java | 201 
 .../v2/reader/PhoenixInputPartition.java   |  44 ++
 .../v2/reader/PhoenixInputPartitionReader.java | 168 +++
 .../v2/writer/PhoenixDataSourceWriteOptions.java   | 109 +
 .../datasource/v2/writer/PhoenixDataWriter.java| 100 
 .../v2/writer/PhoenixDataWriterFactory.java|  19 +
 .../v2/writer/PhoenixDatasourceWriter.java |  34 ++
 ...org.apache.spark.sql.sources.DataSourceRegister |   1 +
 .../apache/phoenix/spark/ConfigurationUtil.scala   |   1 +
 .../apache/phoenix/spark/DataFrameFunctions.scala  |   2 +-
 .../org/apache/phoenix/spark/DefaultSource.scala   |   1 +
 ...lation.scala => FilterExpressionCompiler.scala} | 109 ++---
 .../org/apache/phoenix/spark/PhoenixRDD.scala  |  61 +--
 .../phoenix/spark/PhoenixRecordWritable.scala  |   2 +-
 .../org/apache/phoenix/spark/PhoenixRelation.scala |  70 +--
 .../apache/phoenix/spark/ProductRDDFunctions.scala |   1 +
 .../phoenix/spark/SparkContextFunctions.scala  |   1 +
 .../org/apache/phoenix/spark/SparkSchemaUtil.scala |  84 
 .../phoenix/spark/SparkSqlContextFunctions.scala   |   1 +
 .../datasources/jdbc/PhoenixJdbcDialect.scala  |  21 +
 .../execution/datasources/jdbc/SparkJdbcUtil.scala | 309 
 pom.xml|   2 +-
 32 files changed, 1655 insertions(+), 527 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
index 3051cd6..ef127ac 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
@@ -194,7 +194,7 @@ public abstract class BaseSaltedTableIT extends 
ParallelStatsDisabledIT  {
 .setSelectColumns(
 Lists.newArrayList("A_INTEGER", "A_STRING", "A_ID", 
"B_STRING", "B_INTEGER"))
 .setFullTableName(tableName)
-.setWhereClause("a_integer = 1 AND a_string >= 'ab' AND 
a_string < 'de' AND a_id = '123'");
+.setWhereClause("A_INTEGER = 1 AND A_STRING >= 'ab' AND 
A_STRING < 'de' AND A_ID = '123'");
 rs = executeQuery(conn, queryBuilder);
 assertTrue(rs.next());
 assertEquals(1, rs.getInt(1));
@@ -205,7 +205,7 @@ public abstract class BaseSaltedTableIT extends 
ParallelStatsDisabledIT  {
 assertFalse(rs.next());
 
 // all single slots with one value.
-queryBuilder.setWhereClause("a_integer = 1 AND a_string = 'ab' AND 
a_id = '123'");
+queryBuilder.setWhereClause("A_INTEGER = 1 AND A_STRING = 'ab' AND 
A_ID = '123'");
 rs = executeQuery(conn, queryBuilder);
 assertTrue(rs.next());
 assertEquals(1, rs.getInt(1));
@@ -216,7 +216,7 @@ public abstract class BaseSaltedTableIT extends 
ParallelStatsDisabledIT  {
 assertFalse(rs.next());
 
 // all single slots with multiple values.
-queryBuilder.setWhereClause("a_integer in (2, 4) AND a_string = 
'abc' AND a_id = '123'");
+queryBuilder.setWhereClause("A_INTEGER in (2, 4) AND A_STRING = 
'abc' AND A_ID = '123'");
 rs = executeQuery(conn, queryBuilder);
 
 assertTrue(rs.next());
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 789f688..a7c1a4f

[phoenix] branch 4.x-HBase-1.2 updated: PHOENIX-5059 Use the Datasource v2 api in the spark connector

2019-01-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new 4eaa1b8  PHOENIX-5059 Use the Datasource v2 api in the spark connector
4eaa1b8 is described below

commit 4eaa1b84c5a88385e831f9c4136995e8e2e959a2
Author: Thomas D'Silva 
AuthorDate: Tue Dec 11 14:59:39 2018 -0800

PHOENIX-5059 Use the Datasource v2 api in the spark connector
---
 .../phoenix/end2end/salted/BaseSaltedTableIT.java  |   6 +-
 phoenix-spark/pom.xml  |   8 +
 .../java/org/apache/phoenix/spark/OrderByIT.java   |  92 ++--
 .../java/org/apache/phoenix/spark/SparkUtil.java   |  25 +-
 phoenix-spark/src/it/resources/globalSetup.sql |   6 +-
 .../phoenix/spark/AbstractPhoenixSparkIT.scala |  12 +-
 .../org/apache/phoenix/spark/PhoenixSparkIT.scala  | 541 +++--
 .../spark/PhoenixSparkITTenantSpecific.scala   |  18 +-
 .../spark/datasource/v2/PhoenixDataSource.java |  82 
 .../v2/reader/PhoenixDataSourceReadOptions.java|  51 ++
 .../v2/reader/PhoenixDataSourceReader.java | 201 
 .../v2/reader/PhoenixInputPartition.java   |  44 ++
 .../v2/reader/PhoenixInputPartitionReader.java | 168 +++
 .../v2/writer/PhoenixDataSourceWriteOptions.java   | 109 +
 .../datasource/v2/writer/PhoenixDataWriter.java| 100 
 .../v2/writer/PhoenixDataWriterFactory.java|  19 +
 .../v2/writer/PhoenixDatasourceWriter.java |  34 ++
 ...org.apache.spark.sql.sources.DataSourceRegister |   1 +
 .../apache/phoenix/spark/ConfigurationUtil.scala   |   1 +
 .../apache/phoenix/spark/DataFrameFunctions.scala  |   2 +-
 .../org/apache/phoenix/spark/DefaultSource.scala   |   1 +
 ...lation.scala => FilterExpressionCompiler.scala} | 109 ++---
 .../org/apache/phoenix/spark/PhoenixRDD.scala  |  61 +--
 .../phoenix/spark/PhoenixRecordWritable.scala  |   2 +-
 .../org/apache/phoenix/spark/PhoenixRelation.scala |  70 +--
 .../apache/phoenix/spark/ProductRDDFunctions.scala |   1 +
 .../phoenix/spark/SparkContextFunctions.scala  |   1 +
 .../org/apache/phoenix/spark/SparkSchemaUtil.scala |  84 
 .../phoenix/spark/SparkSqlContextFunctions.scala   |   1 +
 .../datasources/jdbc/PhoenixJdbcDialect.scala  |  21 +
 .../execution/datasources/jdbc/SparkJdbcUtil.scala | 309 
 pom.xml|   2 +-
 32 files changed, 1655 insertions(+), 527 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
index 3051cd6..ef127ac 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
@@ -194,7 +194,7 @@ public abstract class BaseSaltedTableIT extends 
ParallelStatsDisabledIT  {
 .setSelectColumns(
 Lists.newArrayList("A_INTEGER", "A_STRING", "A_ID", 
"B_STRING", "B_INTEGER"))
 .setFullTableName(tableName)
-.setWhereClause("a_integer = 1 AND a_string >= 'ab' AND 
a_string < 'de' AND a_id = '123'");
+.setWhereClause("A_INTEGER = 1 AND A_STRING >= 'ab' AND 
A_STRING < 'de' AND A_ID = '123'");
 rs = executeQuery(conn, queryBuilder);
 assertTrue(rs.next());
 assertEquals(1, rs.getInt(1));
@@ -205,7 +205,7 @@ public abstract class BaseSaltedTableIT extends 
ParallelStatsDisabledIT  {
 assertFalse(rs.next());
 
 // all single slots with one value.
-queryBuilder.setWhereClause("a_integer = 1 AND a_string = 'ab' AND 
a_id = '123'");
+queryBuilder.setWhereClause("A_INTEGER = 1 AND A_STRING = 'ab' AND 
A_ID = '123'");
 rs = executeQuery(conn, queryBuilder);
 assertTrue(rs.next());
 assertEquals(1, rs.getInt(1));
@@ -216,7 +216,7 @@ public abstract class BaseSaltedTableIT extends 
ParallelStatsDisabledIT  {
 assertFalse(rs.next());
 
 // all single slots with multiple values.
-queryBuilder.setWhereClause("a_integer in (2, 4) AND a_string = 
'abc' AND a_id = '123'");
+queryBuilder.setWhereClause("A_INTEGER in (2, 4) AND A_STRING = 
'abc' AND A_ID = '123'");
 rs = executeQuery(conn, queryBuilder);
 
 assertTrue(rs.next());
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index c85fe76..08b3806

phoenix git commit: PHOENIX-5074 DropTableWithViewsIT.testDropTableWithChildViews is flapping

2018-12-20 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 255379d2c -> 46cdaf62c


PHOENIX-5074 DropTableWithViewsIT.testDropTableWithChildViews is flapping


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/46cdaf62
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/46cdaf62
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/46cdaf62

Branch: refs/heads/4.x-HBase-1.4
Commit: 46cdaf62c6d6f0c990865f15c70aa717c6efd9aa
Parents: 255379d
Author: Kadir 
Authored: Thu Dec 20 11:38:44 2018 -0800
Committer: Thomas D'Silva 
Committed: Thu Dec 20 14:21:12 2018 -0800

--
 .../phoenix/end2end/DropTableWithViewsIT.java   | 56 +++-
 1 file changed, 30 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/46cdaf62/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
index 9502218..a4cd354 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
@@ -20,7 +20,6 @@ package org.apache.phoenix.end2end;
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -30,14 +29,16 @@ import java.util.Collection;
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.phoenix.coprocessor.TableViewFinderResult;
+import org.apache.phoenix.coprocessor.TaskRegionObserver;
 import org.apache.phoenix.coprocessor.ViewFinder;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 
-import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.util.SchemaUtil;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -50,6 +51,20 @@ public class DropTableWithViewsIT extends 
SplitSystemCatalogIT {
 private final boolean columnEncoded;
 private final String TENANT_SPECIFIC_URL1 = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=" + TENANT1;
 
+private static RegionCoprocessorEnvironment TaskRegionEnvironment;
+
+@BeforeClass
+public static void doSetup() throws Exception {
+SplitSystemCatalogIT.doSetup();
+TaskRegionEnvironment =
+getUtility()
+.getRSForFirstRegionInTable(
+
PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME)
+
.getRegions(PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME)
+.get(0).getCoprocessorHost()
+
.findCoprocessorEnvironment(TaskRegionObserver.class.getName());
+}
+
 public DropTableWithViewsIT(boolean isMultiTenant, boolean columnEncoded) {
 this.isMultiTenant = isMultiTenant;
 this.columnEncoded = columnEncoded;
@@ -108,30 +123,19 @@ public class DropTableWithViewsIT extends 
SplitSystemCatalogIT {
 // Drop the base table
 String dropTable = String.format("DROP TABLE IF EXISTS %s 
CASCADE", baseTable);
 conn.createStatement().execute(dropTable);
-
-// Wait for the tasks for dropping child views to complete. The 
depth of the view tree is 2, so we expect that
-// this will be done in two task handling runs, i.e., in tree task 
handling interval at most in general
-// by assuming that each non-root level will be processed in one 
interval. To be on the safe side, we will
-// wait at most 10 intervals.
-long halfTimeInterval = 
config.getLong(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB,
-QueryServicesOptions.DEFAULT_TASK_HANDLING_INTERVAL_MS)/2;
-ResultSet rs = null;
-boolean timedOut = true;
-Thread.sleep(3 * halfTimeInterval);
-for (int i = 3; i < 20; i++) {
-rs = conn.createStatement().executeQuery("SELECT * " +
-" FROM " + 
PhoenixDatabaseMetaData.SYSTEM_TASK_NAME +
-" WHERE " + PhoenixDatabaseMetaData.TASK_TYPE 
+ " = " +
-
PTable.TaskType.DROP_CHILD_VIEWS.getSerializedValue());
-  

phoenix git commit: PHOENIX-5074 DropTableWithViewsIT.testDropTableWithChildViews is flapping

2018-12-20 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 e7ce8e815 -> 43f7687fb


PHOENIX-5074 DropTableWithViewsIT.testDropTableWithChildViews is flapping


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/43f7687f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/43f7687f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/43f7687f

Branch: refs/heads/4.x-HBase-1.2
Commit: 43f7687fb328ecd19333bbb514fe9672d8b39b9e
Parents: e7ce8e8
Author: Kadir 
Authored: Thu Dec 20 11:38:44 2018 -0800
Committer: Thomas D'Silva 
Committed: Thu Dec 20 14:20:56 2018 -0800

--
 .../phoenix/end2end/DropTableWithViewsIT.java   | 56 +++-
 1 file changed, 30 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/43f7687f/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
index 9502218..a4cd354 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
@@ -20,7 +20,6 @@ package org.apache.phoenix.end2end;
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -30,14 +29,16 @@ import java.util.Collection;
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.phoenix.coprocessor.TableViewFinderResult;
+import org.apache.phoenix.coprocessor.TaskRegionObserver;
 import org.apache.phoenix.coprocessor.ViewFinder;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 
-import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.util.SchemaUtil;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -50,6 +51,20 @@ public class DropTableWithViewsIT extends 
SplitSystemCatalogIT {
 private final boolean columnEncoded;
 private final String TENANT_SPECIFIC_URL1 = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=" + TENANT1;
 
+private static RegionCoprocessorEnvironment TaskRegionEnvironment;
+
+@BeforeClass
+public static void doSetup() throws Exception {
+SplitSystemCatalogIT.doSetup();
+TaskRegionEnvironment =
+getUtility()
+.getRSForFirstRegionInTable(
+
PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME)
+
.getRegions(PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME)
+.get(0).getCoprocessorHost()
+
.findCoprocessorEnvironment(TaskRegionObserver.class.getName());
+}
+
 public DropTableWithViewsIT(boolean isMultiTenant, boolean columnEncoded) {
 this.isMultiTenant = isMultiTenant;
 this.columnEncoded = columnEncoded;
@@ -108,30 +123,19 @@ public class DropTableWithViewsIT extends 
SplitSystemCatalogIT {
 // Drop the base table
 String dropTable = String.format("DROP TABLE IF EXISTS %s 
CASCADE", baseTable);
 conn.createStatement().execute(dropTable);
-
-// Wait for the tasks for dropping child views to complete. The 
depth of the view tree is 2, so we expect that
-// this will be done in two task handling runs, i.e., in tree task 
handling interval at most in general
-// by assuming that each non-root level will be processed in one 
interval. To be on the safe side, we will
-// wait at most 10 intervals.
-long halfTimeInterval = 
config.getLong(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB,
-QueryServicesOptions.DEFAULT_TASK_HANDLING_INTERVAL_MS)/2;
-ResultSet rs = null;
-boolean timedOut = true;
-Thread.sleep(3 * halfTimeInterval);
-for (int i = 3; i < 20; i++) {
-rs = conn.createStatement().executeQuery("SELECT * " +
-" FROM " + 
PhoenixDatabaseMetaData.SYSTEM_TASK_NAME +
-" WHERE " + PhoenixDatabaseMetaData.TASK_TYPE 
+ " = " +
-
PTable.TaskType.DROP_CHILD_VIEWS.getSerializedValue());
-  

phoenix git commit: PHOENIX-5074 DropTableWithViewsIT.testDropTableWithChildViews is flapping

2018-12-20 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/master 412e07891 -> 57509506d


PHOENIX-5074 DropTableWithViewsIT.testDropTableWithChildViews is flapping


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/57509506
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/57509506
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/57509506

Branch: refs/heads/master
Commit: 57509506dd64f67265473ac9daa30d9756e211d6
Parents: 412e078
Author: Kadir 
Authored: Thu Dec 20 11:38:44 2018 -0800
Committer: Thomas D'Silva 
Committed: Thu Dec 20 14:20:40 2018 -0800

--
 .../phoenix/end2end/DropTableWithViewsIT.java   | 56 +++-
 1 file changed, 30 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/57509506/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
index 9502218..a4cd354 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
@@ -20,7 +20,6 @@ package org.apache.phoenix.end2end;
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -30,14 +29,16 @@ import java.util.Collection;
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.phoenix.coprocessor.TableViewFinderResult;
+import org.apache.phoenix.coprocessor.TaskRegionObserver;
 import org.apache.phoenix.coprocessor.ViewFinder;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 
-import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.util.SchemaUtil;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -50,6 +51,20 @@ public class DropTableWithViewsIT extends 
SplitSystemCatalogIT {
 private final boolean columnEncoded;
 private final String TENANT_SPECIFIC_URL1 = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=" + TENANT1;
 
+private static RegionCoprocessorEnvironment TaskRegionEnvironment;
+
+@BeforeClass
+public static void doSetup() throws Exception {
+SplitSystemCatalogIT.doSetup();
+TaskRegionEnvironment =
+getUtility()
+.getRSForFirstRegionInTable(
+
PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME)
+
.getRegions(PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME)
+.get(0).getCoprocessorHost()
+
.findCoprocessorEnvironment(TaskRegionObserver.class.getName());
+}
+
 public DropTableWithViewsIT(boolean isMultiTenant, boolean columnEncoded) {
 this.isMultiTenant = isMultiTenant;
 this.columnEncoded = columnEncoded;
@@ -108,30 +123,19 @@ public class DropTableWithViewsIT extends 
SplitSystemCatalogIT {
 // Drop the base table
 String dropTable = String.format("DROP TABLE IF EXISTS %s 
CASCADE", baseTable);
 conn.createStatement().execute(dropTable);
-
-// Wait for the tasks for dropping child views to complete. The 
depth of the view tree is 2, so we expect that
-// this will be done in two task handling runs, i.e., in tree task 
handling interval at most in general
-// by assuming that each non-root level will be processed in one 
interval. To be on the safe side, we will
-// wait at most 10 intervals.
-long halfTimeInterval = 
config.getLong(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB,
-QueryServicesOptions.DEFAULT_TASK_HANDLING_INTERVAL_MS)/2;
-ResultSet rs = null;
-boolean timedOut = true;
-Thread.sleep(3 * halfTimeInterval);
-for (int i = 3; i < 20; i++) {
-rs = conn.createStatement().executeQuery("SELECT * " +
-" FROM " + 
PhoenixDatabaseMetaData.SYSTEM_TASK_NAME +
-" WHERE " + PhoenixDatabaseMetaData.TASK_TYPE 
+ " = " +
-
PTable.TaskType.DROP_CHILD_VIEWS.getSerializedValue());
-

phoenix git commit: PHOENIX-5074 DropTableWithViewsIT.testDropTableWithChildViews is flapping

2018-12-20 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 850114334 -> 214274fc3


PHOENIX-5074 DropTableWithViewsIT.testDropTableWithChildViews is flapping


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/214274fc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/214274fc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/214274fc

Branch: refs/heads/4.x-HBase-1.3
Commit: 214274fc3d05db47ad184b0f288e2a6f21d30d1f
Parents: 8501143
Author: Kadir 
Authored: Thu Dec 20 11:38:44 2018 -0800
Committer: Thomas D'Silva 
Committed: Thu Dec 20 14:21:03 2018 -0800

--
 .../phoenix/end2end/DropTableWithViewsIT.java   | 56 +++-
 1 file changed, 30 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/214274fc/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
index 9502218..a4cd354 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
@@ -20,7 +20,6 @@ package org.apache.phoenix.end2end;
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -30,14 +29,16 @@ import java.util.Collection;
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.phoenix.coprocessor.TableViewFinderResult;
+import org.apache.phoenix.coprocessor.TaskRegionObserver;
 import org.apache.phoenix.coprocessor.ViewFinder;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 
-import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.util.SchemaUtil;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -50,6 +51,20 @@ public class DropTableWithViewsIT extends 
SplitSystemCatalogIT {
 private final boolean columnEncoded;
 private final String TENANT_SPECIFIC_URL1 = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=" + TENANT1;
 
+private static RegionCoprocessorEnvironment TaskRegionEnvironment;
+
+@BeforeClass
+public static void doSetup() throws Exception {
+SplitSystemCatalogIT.doSetup();
+TaskRegionEnvironment =
+getUtility()
+.getRSForFirstRegionInTable(
+
PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME)
+
.getRegions(PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME)
+.get(0).getCoprocessorHost()
+
.findCoprocessorEnvironment(TaskRegionObserver.class.getName());
+}
+
 public DropTableWithViewsIT(boolean isMultiTenant, boolean columnEncoded) {
 this.isMultiTenant = isMultiTenant;
 this.columnEncoded = columnEncoded;
@@ -108,30 +123,19 @@ public class DropTableWithViewsIT extends 
SplitSystemCatalogIT {
 // Drop the base table
 String dropTable = String.format("DROP TABLE IF EXISTS %s 
CASCADE", baseTable);
 conn.createStatement().execute(dropTable);
-
-// Wait for the tasks for dropping child views to complete. The 
depth of the view tree is 2, so we expect that
-// this will be done in two task handling runs, i.e., in tree task 
handling interval at most in general
-// by assuming that each non-root level will be processed in one 
interval. To be on the safe side, we will
-// wait at most 10 intervals.
-long halfTimeInterval = 
config.getLong(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB,
-QueryServicesOptions.DEFAULT_TASK_HANDLING_INTERVAL_MS)/2;
-ResultSet rs = null;
-boolean timedOut = true;
-Thread.sleep(3 * halfTimeInterval);
-for (int i = 3; i < 20; i++) {
-rs = conn.createStatement().executeQuery("SELECT * " +
-" FROM " + 
PhoenixDatabaseMetaData.SYSTEM_TASK_NAME +
-" WHERE " + PhoenixDatabaseMetaData.TASK_TYPE 
+ " = " +
-
PTable.TaskType.DROP_CHILD_VIEWS.getSerializedValue());
-  

phoenix git commit: PHOENIX-4983: Added missing apache license header.

2018-12-17 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/master 71946ed27 -> ad4467463


PHOENIX-4983: Added missing apache license header.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ad446746
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ad446746
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ad446746

Branch: refs/heads/master
Commit: ad4467463a86c99ea5883d11543659737d5c16b7
Parents: 71946ed
Author: s.kadam 
Authored: Fri Dec 14 11:04:29 2018 -0500
Committer: Thomas D'Silva 
Committed: Mon Dec 17 10:53:44 2018 -0800

--
 .../apache/phoenix/end2end/UpsertWithSCNIT.java| 17 +
 1 file changed, 17 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ad446746/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
index 6f231ff..40bb883 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.end2end;
 
 import org.apache.phoenix.exception.SQLExceptionCode;



phoenix git commit: PHOENIX-4983: Added missing apache license header.

2018-12-17 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 eb3c3db92 -> 820d76a6b


PHOENIX-4983: Added missing apache license header.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/820d76a6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/820d76a6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/820d76a6

Branch: refs/heads/4.x-HBase-1.3
Commit: 820d76a6b258179f0e6db5264606bc483348506c
Parents: eb3c3db
Author: s.kadam 
Authored: Fri Dec 14 11:04:29 2018 -0500
Committer: Thomas D'Silva 
Committed: Mon Dec 17 10:53:18 2018 -0800

--
 .../apache/phoenix/end2end/UpsertWithSCNIT.java| 17 +
 1 file changed, 17 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/820d76a6/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
index 6f231ff..40bb883 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.end2end;
 
 import org.apache.phoenix.exception.SQLExceptionCode;



phoenix git commit: PHOENIX-4983: Added missing apache license header.

2018-12-17 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 ed6ff9b32 -> 32b693130


PHOENIX-4983: Added missing apache license header.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/32b69313
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/32b69313
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/32b69313

Branch: refs/heads/4.x-HBase-1.4
Commit: 32b6931303ee433ca53c4bafd98d0faeaf7d2b56
Parents: ed6ff9b
Author: s.kadam 
Authored: Fri Dec 14 11:04:29 2018 -0500
Committer: Thomas D'Silva 
Committed: Mon Dec 17 10:53:30 2018 -0800

--
 .../apache/phoenix/end2end/UpsertWithSCNIT.java| 17 +
 1 file changed, 17 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/32b69313/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
index 6f231ff..40bb883 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.end2end;
 
 import org.apache.phoenix.exception.SQLExceptionCode;



phoenix git commit: PHOENIX-4983: Added missing apache license header.

2018-12-17 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 67b568395 -> e85a27fd3


PHOENIX-4983: Added missing apache license header.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e85a27fd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e85a27fd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e85a27fd

Branch: refs/heads/4.x-HBase-1.2
Commit: e85a27fd3f69176b42a6c556001ec4f8529a5794
Parents: 67b5683
Author: s.kadam 
Authored: Fri Dec 14 11:04:29 2018 -0500
Committer: Thomas D'Silva 
Committed: Mon Dec 17 10:53:03 2018 -0800

--
 .../apache/phoenix/end2end/UpsertWithSCNIT.java| 17 +
 1 file changed, 17 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e85a27fd/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
index 6f231ff..40bb883 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.end2end;
 
 import org.apache.phoenix.exception.SQLExceptionCode;



phoenix git commit: PHOENIX-4983: Allow using a connection with a SCN set to write data to tables EXCEPT transactional tables or mutable tables with indexes or tables with ROW_TIMESTAMP column.

2018-12-11 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 411e44849 -> 44bc4fc22


PHOENIX-4983: Allow using a connection with a SCN set to write data to tables 
EXCEPT transactional tables or mutable tables with indexes or tables with 
ROW_TIMESTAMP column.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/44bc4fc2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/44bc4fc2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/44bc4fc2

Branch: refs/heads/4.x-HBase-1.3
Commit: 44bc4fc22a9de76357b77faa35230ec5a7684cc8
Parents: 411e448
Author: s.kadam 
Authored: Mon Dec 10 14:40:17 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Dec 11 17:04:21 2018 -0800

--
 .../apache/phoenix/end2end/UpsertWithSCNIT.java | 139 +++
 .../apache/phoenix/compile/UpsertCompiler.java  |  23 ++-
 .../phoenix/exception/SQLExceptionCode.java |  13 +-
 .../apache/phoenix/jdbc/PhoenixConnection.java  |   2 +-
 4 files changed, 172 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/44bc4fc2/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
new file mode 100644
index 000..6f231ff
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
@@ -0,0 +1,139 @@
+package org.apache.phoenix.end2end;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Properties;
+
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+
+public class UpsertWithSCNIT extends ParallelStatsDisabledIT {
+
+@Rule
+public final ExpectedException exception = ExpectedException.none();
+Properties props = null;
+PreparedStatement prep = null;
+String tableName =null;
+
+private void helpTestUpserWithSCNIT(boolean rowColumn, boolean txTable,
+boolean mutable, boolean local, 
boolean global)
+throws SQLException {
+
+tableName = generateUniqueName();
+String indx;
+String createTable = "CREATE TABLE "+tableName+" ("
++ (rowColumn ? "CREATED_DATE DATE NOT NULL, ":"")
++ "METRIC_ID CHAR(15) NOT NULL,METRIC_VALUE VARCHAR(50) 
CONSTRAINT PK PRIMARY KEY("
++ (rowColumn? "CREATED_DATE ROW_TIMESTAMP, ":"") + 
"METRIC_ID)) "
++ (mutable? "IMMUTABLE_ROWS=false":"" )
++ (txTable ? 
"TRANSACTION_PROVIDER='TEPHRA',TRANSACTIONAL=true":"");
+props = new Properties();
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.createStatement().execute(createTable);
+
+if(local || global ){
+indx = "CREATE "+ (local? "LOCAL " : "") + "INDEX 
"+tableName+"_idx ON " +
+""+tableName+" (METRIC_VALUE)";
+conn.createStatement().execute(indx);
+}
+
+props.setProperty("CurrentSCN", 
Long.toString(System.currentTimeMillis()));
+conn = DriverManager.getConnection(getUrl(), props);
+conn.setAutoCommit(true);
+String upsert = "UPSERT INTO "+tableName+" (METRIC_ID, METRIC_VALUE) 
VALUES (?,?)";
+prep = conn.prepareStatement(upsert);
+prep.setString(1,"abc");
+prep.setString(2,"This is the first comment!");
+}
+
+@Test // See https://issues.apache.org/jira/browse/PHOENIX-4983
+public void testUpsertOnSCNSetTxnTable() throws SQLException {
+
+helpTestUpserWithSCNIT(false, true, false, false, false);
+exception.expect(SQLException.class);
+exception.expectMessage(containsString(String.valueOf(
+SQLExceptionCode
+.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE
+.getErrorCode(;
+prep.executeUpdate();
+}
+
+@Test
+public void testUpsertOnSCNSetMutTableWithoutIdx() throws Exception {
+
+helpTestUpserWithSCNIT(false, false, true, false, false);
+prep.executeUpdate();
+props = new Properties();
+Connection conn = DriverManager.getConnectio

phoenix git commit: PHOENIX-4983: Allow using a connection with a SCN set to write data to tables EXCEPT transactional tables or mutable tables with indexes or tables with ROW_TIMESTAMP column.

2018-12-11 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/master f0881a137 -> 1c042c25e


PHOENIX-4983: Allow using a connection with a SCN set to write data to tables 
EXCEPT transactional tables or mutable tables with indexes or tables with 
ROW_TIMESTAMP column.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1c042c25
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1c042c25
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1c042c25

Branch: refs/heads/master
Commit: 1c042c25ed833945abe07958b82d5e9fb701ac89
Parents: f0881a1
Author: s.kadam 
Authored: Mon Dec 10 14:40:17 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Dec 11 17:04:53 2018 -0800

--
 .../apache/phoenix/end2end/UpsertWithSCNIT.java | 139 +++
 .../apache/phoenix/compile/UpsertCompiler.java  |  23 ++-
 .../phoenix/exception/SQLExceptionCode.java |  13 +-
 .../apache/phoenix/jdbc/PhoenixConnection.java  |   2 +-
 4 files changed, 172 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c042c25/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
new file mode 100644
index 000..6f231ff
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
@@ -0,0 +1,139 @@
+package org.apache.phoenix.end2end;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Properties;
+
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+
+public class UpsertWithSCNIT extends ParallelStatsDisabledIT {
+
+@Rule
+public final ExpectedException exception = ExpectedException.none();
+Properties props = null;
+PreparedStatement prep = null;
+String tableName =null;
+
+private void helpTestUpserWithSCNIT(boolean rowColumn, boolean txTable,
+boolean mutable, boolean local, 
boolean global)
+throws SQLException {
+
+tableName = generateUniqueName();
+String indx;
+String createTable = "CREATE TABLE "+tableName+" ("
++ (rowColumn ? "CREATED_DATE DATE NOT NULL, ":"")
++ "METRIC_ID CHAR(15) NOT NULL,METRIC_VALUE VARCHAR(50) 
CONSTRAINT PK PRIMARY KEY("
++ (rowColumn? "CREATED_DATE ROW_TIMESTAMP, ":"") + 
"METRIC_ID)) "
++ (mutable? "IMMUTABLE_ROWS=false":"" )
++ (txTable ? 
"TRANSACTION_PROVIDER='TEPHRA',TRANSACTIONAL=true":"");
+props = new Properties();
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.createStatement().execute(createTable);
+
+if(local || global ){
+indx = "CREATE "+ (local? "LOCAL " : "") + "INDEX 
"+tableName+"_idx ON " +
+""+tableName+" (METRIC_VALUE)";
+conn.createStatement().execute(indx);
+}
+
+props.setProperty("CurrentSCN", 
Long.toString(System.currentTimeMillis()));
+conn = DriverManager.getConnection(getUrl(), props);
+conn.setAutoCommit(true);
+String upsert = "UPSERT INTO "+tableName+" (METRIC_ID, METRIC_VALUE) 
VALUES (?,?)";
+prep = conn.prepareStatement(upsert);
+prep.setString(1,"abc");
+prep.setString(2,"This is the first comment!");
+}
+
+@Test // See https://issues.apache.org/jira/browse/PHOENIX-4983
+public void testUpsertOnSCNSetTxnTable() throws SQLException {
+
+helpTestUpserWithSCNIT(false, true, false, false, false);
+exception.expect(SQLException.class);
+exception.expectMessage(containsString(String.valueOf(
+SQLExceptionCode
+.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE
+.getErrorCode(;
+prep.executeUpdate();
+}
+
+@Test
+public void testUpsertOnSCNSetMutTableWithoutIdx() throws Exception {
+
+helpTestUpserWithSCNIT(false, false, true, false, false);
+prep.executeUpdate();
+props = new Properties();
+Connection conn = DriverManager.getConnection(getUrl(),pro

phoenix git commit: PHOENIX-4983: Allow using a connection with a SCN set to write data to tables EXCEPT transactional tables or mutable tables with indexes or tables with ROW_TIMESTAMP column.

2018-12-11 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 e88051a3e -> c634bf743


PHOENIX-4983: Allow using a connection with a SCN set to write data to tables 
EXCEPT transactional tables or mutable tables with indexes or tables with 
ROW_TIMESTAMP column.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c634bf74
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c634bf74
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c634bf74

Branch: refs/heads/4.x-HBase-1.4
Commit: c634bf743632383a5dc369b5a9f08968949c8bfc
Parents: e88051a
Author: s.kadam 
Authored: Mon Dec 10 14:40:17 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Dec 11 17:04:35 2018 -0800

--
 .../apache/phoenix/end2end/UpsertWithSCNIT.java | 139 +++
 .../apache/phoenix/compile/UpsertCompiler.java  |  23 ++-
 .../phoenix/exception/SQLExceptionCode.java |  13 +-
 .../apache/phoenix/jdbc/PhoenixConnection.java  |   2 +-
 4 files changed, 172 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c634bf74/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
new file mode 100644
index 000..6f231ff
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
@@ -0,0 +1,139 @@
+package org.apache.phoenix.end2end;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Properties;
+
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+
+public class UpsertWithSCNIT extends ParallelStatsDisabledIT {
+
+@Rule
+public final ExpectedException exception = ExpectedException.none();
+Properties props = null;
+PreparedStatement prep = null;
+String tableName =null;
+
+private void helpTestUpserWithSCNIT(boolean rowColumn, boolean txTable,
+boolean mutable, boolean local, 
boolean global)
+throws SQLException {
+
+tableName = generateUniqueName();
+String indx;
+String createTable = "CREATE TABLE "+tableName+" ("
++ (rowColumn ? "CREATED_DATE DATE NOT NULL, ":"")
++ "METRIC_ID CHAR(15) NOT NULL,METRIC_VALUE VARCHAR(50) 
CONSTRAINT PK PRIMARY KEY("
++ (rowColumn? "CREATED_DATE ROW_TIMESTAMP, ":"") + 
"METRIC_ID)) "
++ (mutable? "IMMUTABLE_ROWS=false":"" )
++ (txTable ? 
"TRANSACTION_PROVIDER='TEPHRA',TRANSACTIONAL=true":"");
+props = new Properties();
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.createStatement().execute(createTable);
+
+if(local || global ){
+indx = "CREATE "+ (local? "LOCAL " : "") + "INDEX 
"+tableName+"_idx ON " +
+""+tableName+" (METRIC_VALUE)";
+conn.createStatement().execute(indx);
+}
+
+props.setProperty("CurrentSCN", 
Long.toString(System.currentTimeMillis()));
+conn = DriverManager.getConnection(getUrl(), props);
+conn.setAutoCommit(true);
+String upsert = "UPSERT INTO "+tableName+" (METRIC_ID, METRIC_VALUE) 
VALUES (?,?)";
+prep = conn.prepareStatement(upsert);
+prep.setString(1,"abc");
+prep.setString(2,"This is the first comment!");
+}
+
+@Test // See https://issues.apache.org/jira/browse/PHOENIX-4983
+public void testUpsertOnSCNSetTxnTable() throws SQLException {
+
+helpTestUpserWithSCNIT(false, true, false, false, false);
+exception.expect(SQLException.class);
+exception.expectMessage(containsString(String.valueOf(
+SQLExceptionCode
+.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE
+.getErrorCode(;
+prep.executeUpdate();
+}
+
+@Test
+public void testUpsertOnSCNSetMutTableWithoutIdx() throws Exception {
+
+helpTestUpserWithSCNIT(false, false, true, false, false);
+prep.executeUpdate();
+props = new Properties();
+Connection conn = DriverManager.getConnectio

phoenix git commit: PHOENIX-4983: Allow using a connection with a SCN set to write data to tables EXCEPT transactional tables or mutable tables with indexes or tables with ROW_TIMESTAMP column.

2018-12-11 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 40cdb7b87 -> f257f5e02


PHOENIX-4983: Allow using a connection with a SCN set to write data to tables 
EXCEPT transactional tables or mutable tables with indexes or tables with 
ROW_TIMESTAMP column.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f257f5e0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f257f5e0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f257f5e0

Branch: refs/heads/4.x-HBase-1.2
Commit: f257f5e02898aecd5a4ae0079f1d63a6549cc6e5
Parents: 40cdb7b
Author: s.kadam 
Authored: Mon Dec 10 14:40:17 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Dec 11 17:04:07 2018 -0800

--
 .../apache/phoenix/end2end/UpsertWithSCNIT.java | 139 +++
 .../apache/phoenix/compile/UpsertCompiler.java  |  23 ++-
 .../phoenix/exception/SQLExceptionCode.java |  13 +-
 .../apache/phoenix/jdbc/PhoenixConnection.java  |   2 +-
 4 files changed, 172 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f257f5e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
new file mode 100644
index 000..6f231ff
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
@@ -0,0 +1,139 @@
+package org.apache.phoenix.end2end;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Properties;
+
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+
+public class UpsertWithSCNIT extends ParallelStatsDisabledIT {
+
+@Rule
+public final ExpectedException exception = ExpectedException.none();
+Properties props = null;
+PreparedStatement prep = null;
+String tableName =null;
+
+private void helpTestUpserWithSCNIT(boolean rowColumn, boolean txTable,
+boolean mutable, boolean local, 
boolean global)
+throws SQLException {
+
+tableName = generateUniqueName();
+String indx;
+String createTable = "CREATE TABLE "+tableName+" ("
++ (rowColumn ? "CREATED_DATE DATE NOT NULL, ":"")
++ "METRIC_ID CHAR(15) NOT NULL,METRIC_VALUE VARCHAR(50) 
CONSTRAINT PK PRIMARY KEY("
++ (rowColumn? "CREATED_DATE ROW_TIMESTAMP, ":"") + 
"METRIC_ID)) "
++ (mutable? "IMMUTABLE_ROWS=false":"" )
++ (txTable ? 
"TRANSACTION_PROVIDER='TEPHRA',TRANSACTIONAL=true":"");
+props = new Properties();
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.createStatement().execute(createTable);
+
+if(local || global ){
+indx = "CREATE "+ (local? "LOCAL " : "") + "INDEX 
"+tableName+"_idx ON " +
+""+tableName+" (METRIC_VALUE)";
+conn.createStatement().execute(indx);
+}
+
+props.setProperty("CurrentSCN", 
Long.toString(System.currentTimeMillis()));
+conn = DriverManager.getConnection(getUrl(), props);
+conn.setAutoCommit(true);
+String upsert = "UPSERT INTO "+tableName+" (METRIC_ID, METRIC_VALUE) 
VALUES (?,?)";
+prep = conn.prepareStatement(upsert);
+prep.setString(1,"abc");
+prep.setString(2,"This is the first comment!");
+}
+
+@Test // See https://issues.apache.org/jira/browse/PHOENIX-4983
+public void testUpsertOnSCNSetTxnTable() throws SQLException {
+
+helpTestUpserWithSCNIT(false, true, false, false, false);
+exception.expect(SQLException.class);
+exception.expectMessage(containsString(String.valueOf(
+SQLExceptionCode
+.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE
+.getErrorCode(;
+prep.executeUpdate();
+}
+
+@Test
+public void testUpsertOnSCNSetMutTableWithoutIdx() throws Exception {
+
+helpTestUpserWithSCNIT(false, false, true, false, false);
+prep.executeUpdate();
+props = new Properties();
+Connection conn = DriverManager.getConnectio

phoenix git commit: ScanningResultIterator metric RowsScanned not set. PHOENIX-5051

2018-12-05 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 8a3295894 -> 7380c74d3


ScanningResultIterator metric RowsScanned not set. PHOENIX-5051


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7380c74d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7380c74d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7380c74d

Branch: refs/heads/4.x-HBase-1.3
Commit: 7380c74d3fc9a7ec0699b0a1de418bd80aaf2e34
Parents: 8a32958
Author: chfeng 
Authored: Wed Dec 5 10:40:29 2018 +0800
Committer: Thomas D'Silva 
Committed: Wed Dec 5 10:05:14 2018 -0800

--
 .../java/org/apache/phoenix/iterate/ScanningResultIterator.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7380c74d/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index f02e9d3..893eaa2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -117,7 +117,7 @@ public class ScanningResultIterator implements 
ResultIterator {
 scanMetricsMap.get(RPC_RETRIES_METRIC_NAME));
 changeMetric(scanMetricsHolder.getCountOfRemoteRPCRetries(),
 scanMetricsMap.get(REMOTE_RPC_RETRIES_METRIC_NAME));
-changeMetric(scanMetricsHolder.getCountOfRowsFiltered(),
+changeMetric(scanMetricsHolder.getCountOfRowsScanned(),
 scanMetricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME));
 changeMetric(scanMetricsHolder.getCountOfRowsFiltered(),
 
scanMetricsMap.get(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME));



phoenix git commit: ScanningResultIterator metric RowsScanned not set. PHOENIX-5051

2018-12-05 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 9d8bdd687 -> fd0bcc8a6


ScanningResultIterator metric RowsScanned not set. PHOENIX-5051


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fd0bcc8a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fd0bcc8a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fd0bcc8a

Branch: refs/heads/4.x-HBase-1.4
Commit: fd0bcc8a6db701f02e0f38ca07b2e4e7fc7be65b
Parents: 9d8bdd6
Author: chfeng 
Authored: Wed Dec 5 10:40:29 2018 +0800
Committer: Thomas D'Silva 
Committed: Wed Dec 5 10:05:22 2018 -0800

--
 .../java/org/apache/phoenix/iterate/ScanningResultIterator.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd0bcc8a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index f02e9d3..893eaa2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -117,7 +117,7 @@ public class ScanningResultIterator implements 
ResultIterator {
 scanMetricsMap.get(RPC_RETRIES_METRIC_NAME));
 changeMetric(scanMetricsHolder.getCountOfRemoteRPCRetries(),
 scanMetricsMap.get(REMOTE_RPC_RETRIES_METRIC_NAME));
-changeMetric(scanMetricsHolder.getCountOfRowsFiltered(),
+changeMetric(scanMetricsHolder.getCountOfRowsScanned(),
 scanMetricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME));
 changeMetric(scanMetricsHolder.getCountOfRowsFiltered(),
 
scanMetricsMap.get(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME));



phoenix git commit: ScanningResultIterator metric RowsScanned not set. PHOENIX-5051

2018-12-05 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 932dbe7aa -> 81c3f346a


ScanningResultIterator metric RowsScanned not set. PHOENIX-5051


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/81c3f346
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/81c3f346
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/81c3f346

Branch: refs/heads/4.x-HBase-1.2
Commit: 81c3f346a683700fef6dcb2d49486e58cb514c15
Parents: 932dbe7
Author: chfeng 
Authored: Wed Dec 5 10:40:29 2018 +0800
Committer: Thomas D'Silva 
Committed: Wed Dec 5 10:06:42 2018 -0800

--
 .../java/org/apache/phoenix/iterate/ScanningResultIterator.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/81c3f346/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index f02e9d3..893eaa2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -117,7 +117,7 @@ public class ScanningResultIterator implements 
ResultIterator {
 scanMetricsMap.get(RPC_RETRIES_METRIC_NAME));
 changeMetric(scanMetricsHolder.getCountOfRemoteRPCRetries(),
 scanMetricsMap.get(REMOTE_RPC_RETRIES_METRIC_NAME));
-changeMetric(scanMetricsHolder.getCountOfRowsFiltered(),
+changeMetric(scanMetricsHolder.getCountOfRowsScanned(),
 scanMetricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME));
 changeMetric(scanMetricsHolder.getCountOfRowsFiltered(),
 
scanMetricsMap.get(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME));



phoenix git commit: ScanningResultIterator metric RowsScanned not set. PHOENIX-5051

2018-12-05 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/master 1ab08c726 -> aa276bfd2


ScanningResultIterator metric RowsScanned not set. PHOENIX-5051


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/aa276bfd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/aa276bfd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/aa276bfd

Branch: refs/heads/master
Commit: aa276bfd2ef856f9da24c32710c616c9d195463b
Parents: 1ab08c7
Author: chfeng 
Authored: Wed Dec 5 10:40:29 2018 +0800
Committer: Thomas D'Silva 
Committed: Wed Dec 5 10:05:34 2018 -0800

--
 .../java/org/apache/phoenix/iterate/ScanningResultIterator.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/aa276bfd/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index f02e9d3..893eaa2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -117,7 +117,7 @@ public class ScanningResultIterator implements 
ResultIterator {
 scanMetricsMap.get(RPC_RETRIES_METRIC_NAME));
 changeMetric(scanMetricsHolder.getCountOfRemoteRPCRetries(),
 scanMetricsMap.get(REMOTE_RPC_RETRIES_METRIC_NAME));
-changeMetric(scanMetricsHolder.getCountOfRowsFiltered(),
+changeMetric(scanMetricsHolder.getCountOfRowsScanned(),
 scanMetricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME));
 changeMetric(scanMetricsHolder.getCountOfRowsFiltered(),
 
scanMetricsMap.get(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME));



phoenix git commit: PHOENIX-4765 Add client and server side config property to enable rollback of splittable System Catalog if required (addendum)

2018-11-27 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/master 0a84ad6c1 -> 70d5cd9e3


PHOENIX-4765 Add client and server side config property to enable rollback of 
splittable System Catalog if required (addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/70d5cd9e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/70d5cd9e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/70d5cd9e

Branch: refs/heads/master
Commit: 70d5cd9e348dcc31eeea93cc9452527666d9b6d2
Parents: 0a84ad6
Author: Thomas D'Silva 
Authored: Tue Nov 27 13:46:19 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Nov 27 13:47:23 2018 -0800

--
 .../java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java | 2 ++
 .../src/main/java/org/apache/phoenix/query/QueryServices.java | 3 +++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/70d5cd9e/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 5d2fb54..8790819 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2692,6 +2692,8 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
 
 private MetaDataResponse processRemoteRegionMutations(byte[] 
systemTableName,
 List remoteMutations, MetaDataProtos.MutationCode 
mutationCode) throws IOException {
+if (remoteMutations.isEmpty())
+return null;
 MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
 try (Table hTable =
 ServerUtil.getHTableForCoprocessorScan(env,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/70d5cd9e/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 1c17da9..8e17749 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -348,6 +348,9 @@ public interface QueryServices extends SQLCloseable {
 // feature
 //
 // By default this config is false meaning that rolling back the upgrade 
is not possible
+// If this config is true and you want to rollback the upgrade be sure to 
run the sql commands in
+// UpgradeUtil.addParentToChildLink which will recreate the PARENT->CHILD 
links in SYSTEM.CATALOG. This is needed
+// as from 4.15 onwards the PARENT->CHILD links are stored in a separate 
SYSTEM.CHILD_LINK table.
 public static final String ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK =
 "phoenix.allow.system.catalog.rollback";
 



phoenix git commit: PHOENIX-4765 Add client and server side config property to enable rollback of splittable System Catalog if required (addendum)

2018-11-27 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 bb67a6534 -> 1813af615


PHOENIX-4765 Add client and server side config property to enable rollback of 
splittable System Catalog if required (addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1813af61
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1813af61
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1813af61

Branch: refs/heads/4.x-HBase-1.3
Commit: 1813af61598ba2dba3cbada7272ced16836ff77d
Parents: bb67a65
Author: Thomas D'Silva 
Authored: Tue Nov 27 13:46:19 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Nov 27 13:47:11 2018 -0800

--
 .../java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java | 2 ++
 .../src/main/java/org/apache/phoenix/query/QueryServices.java | 3 +++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1813af61/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 14caca3..d138132 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2678,6 +2678,8 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 
 private MetaDataResponse processRemoteRegionMutations(byte[] 
systemTableName,
 List remoteMutations, MetaDataProtos.MutationCode 
mutationCode) throws IOException {
+if (remoteMutations.isEmpty())
+return null;
 MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
 try (Table hTable =
 env.getTable(

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1813af61/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 728f3f8..becd116 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -345,6 +345,9 @@ public interface QueryServices extends SQLCloseable {
 // feature
 //
 // By default this config is false meaning that rolling back the upgrade 
is not possible
+// If this config is true and you want to rollback the upgrade be sure to 
run the sql commands in
+// UpgradeUtil.addParentToChildLink which will recreate the PARENT->CHILD 
links in SYSTEM.CATALOG. This is needed
+// as from 4.15 onwards the PARENT->CHILD links are stored in a separate 
SYSTEM.CHILD_LINK table.
 public static final String ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK =
 "phoenix.allow.system.catalog.rollback";
 



phoenix git commit: PHOENIX-4765 Add client and server side config property to enable rollback of splittable System Catalog if required (addendum)

2018-11-27 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 3ca3552d7 -> d0a115ce0


PHOENIX-4765 Add client and server side config property to enable rollback of 
splittable System Catalog if required (addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d0a115ce
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d0a115ce
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d0a115ce

Branch: refs/heads/4.x-HBase-1.4
Commit: d0a115ce05909180e99515d37ecf7689a8505611
Parents: 3ca3552
Author: Thomas D'Silva 
Authored: Tue Nov 27 13:46:19 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Nov 27 13:47:17 2018 -0800

--
 .../java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java | 2 ++
 .../src/main/java/org/apache/phoenix/query/QueryServices.java | 3 +++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d0a115ce/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 14caca3..d138132 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2678,6 +2678,8 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 
 private MetaDataResponse processRemoteRegionMutations(byte[] 
systemTableName,
 List remoteMutations, MetaDataProtos.MutationCode 
mutationCode) throws IOException {
+if (remoteMutations.isEmpty())
+return null;
 MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
 try (Table hTable =
 env.getTable(

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d0a115ce/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 1c17da9..8e17749 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -348,6 +348,9 @@ public interface QueryServices extends SQLCloseable {
 // feature
 //
 // By default this config is false meaning that rolling back the upgrade 
is not possible
+// If this config is true and you want to rollback the upgrade be sure to 
run the sql commands in
+// UpgradeUtil.addParentToChildLink which will recreate the PARENT->CHILD 
links in SYSTEM.CATALOG. This is needed
+// as from 4.15 onwards the PARENT->CHILD links are stored in a separate 
SYSTEM.CHILD_LINK table.
 public static final String ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK =
 "phoenix.allow.system.catalog.rollback";
 



phoenix git commit: PHOENIX-4765 Add client and server side config property to enable rollback of splittable System Catalog if required (addendum)

2018-11-27 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 c86b3e42e -> 93e284647


PHOENIX-4765 Add client and server side config property to enable rollback of 
splittable System Catalog if required (addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/93e28464
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/93e28464
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/93e28464

Branch: refs/heads/4.x-HBase-1.2
Commit: 93e28464780a18ac26793c71188b5ebcbaee2011
Parents: c86b3e4
Author: Thomas D'Silva 
Authored: Tue Nov 27 13:46:19 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Nov 27 13:46:19 2018 -0800

--
 .../java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java | 2 ++
 .../src/main/java/org/apache/phoenix/query/QueryServices.java | 3 +++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/93e28464/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 14caca3..d138132 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2678,6 +2678,8 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 
 private MetaDataResponse processRemoteRegionMutations(byte[] 
systemTableName,
 List remoteMutations, MetaDataProtos.MutationCode 
mutationCode) throws IOException {
+if (remoteMutations.isEmpty())
+return null;
 MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
 try (Table hTable =
 env.getTable(

http://git-wip-us.apache.org/repos/asf/phoenix/blob/93e28464/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 728f3f8..becd116 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -345,6 +345,9 @@ public interface QueryServices extends SQLCloseable {
 // feature
 //
 // By default this config is false meaning that rolling back the upgrade 
is not possible
+// If this config is true and you want to rollback the upgrade be sure to 
run the sql commands in
+// UpgradeUtil.addParentToChildLink which will recreate the PARENT->CHILD 
links in SYSTEM.CATALOG. This is needed
+// as from 4.15 onwards the PARENT->CHILD links are stored in a separate 
SYSTEM.CHILD_LINK table.
 public static final String ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK =
 "phoenix.allow.system.catalog.rollback";
 



phoenix git commit: PHOENIX-4765 Add client and server side config property to enable rollback of splittable System Catalog if required

2018-11-26 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/master 12c8f1b1f -> 0a84ad6c1


PHOENIX-4765 Add client and server side config property to enable rollback of 
splittable System Catalog if required


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0a84ad6c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0a84ad6c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0a84ad6c

Branch: refs/heads/master
Commit: 0a84ad6c155b49530cf6eca9af6ed95b51ff96c2
Parents: 12c8f1b
Author: Thomas D'Silva 
Authored: Tue Nov 20 12:10:05 2018 -0800
Committer: Thomas D'Silva 
Committed: Mon Nov 26 16:36:05 2018 -0800

--
 .../apache/phoenix/end2end/SystemCatalogIT.java | 40 -
 .../coprocessor/MetaDataEndpointImpl.java   | 90 ++--
 .../phoenix/coprocessor/MetaDataProtocol.java   |  5 +-
 .../org/apache/phoenix/query/QueryServices.java | 17 
 .../phoenix/query/QueryServicesOptions.java |  2 +
 .../apache/phoenix/schema/MetaDataClient.java   | 26 +-
 6 files changed, 146 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0a84ad6c/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
index 3133b56..cf7dcc2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -31,10 +32,12 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -44,11 +47,12 @@ import com.google.common.collect.Maps;
 @Category(NeedsOwnMiniClusterTest.class)
 public class SystemCatalogIT extends BaseTest {
 private HBaseTestingUtility testUtil = null;
-
+
@BeforeClass
public static void doSetup() throws Exception {
Map serverProps = 
Maps.newHashMapWithExpectedSize(1);
serverProps.put(QueryServices.SYSTEM_CATALOG_SPLITTABLE, 
"false");
+
serverProps.put(QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK, "true");
Map clientProps = Collections.emptyMap();
setUpTestDriver(new 
ReadOnlyProps(serverProps.entrySet().iterator()),
new 
ReadOnlyProps(clientProps.entrySet().iterator()));
@@ -87,7 +91,8 @@ public class SystemCatalogIT extends BaseTest {
 Statement stmt = conn.createStatement();) {
 stmt.execute("DROP TABLE IF EXISTS " + tableName);
 stmt.execute("CREATE TABLE " + tableName
-+ " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 
VARCHAR CONSTRAINT PK PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT=true");
++ " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 
VARCHAR CONSTRAINT PK " +
+"PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT=true");
 try (Connection tenant1Conn = getTenantConnection("tenant1")) {
 String view1DDL = "CREATE VIEW " + tableName + "_view AS 
SELECT * FROM " + tableName;
 tenant1Conn.createStatement().execute(view1DDL);
@@ -97,7 +102,7 @@ public class SystemCatalogIT extends BaseTest {
 }
 
 private String getJdbcUrl() {
-return "jdbc:phoenix:localhost:" + 
testUtil.getZkCluster().getClientPort() + ":/hbase";
+return "jdbc:phoenix:localhost:" + 
getUtility().getZkCluster().getClientPort() + ":/hbase";
 }
 
 private Connection getTenantConnection(String tenantId) throws 
SQLException {
@@ -105,4 +110,31 @@ public class SystemCatalogIT extends BaseTest {
 tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
 return DriverManager.getConnection(getJdbcUrl(), tenantProps);
 }
-}
\ No newline at end of file
+
+/**
+ * Ensure that we cannot add a column to a base table if 
QueryServices.BLOCK_METADATA_CHANGES_REQUIRE_P

phoenix git commit: PHOENIX-4765 Add client and server side config property to enable rollback of splittable System Catalog if required

2018-11-26 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 d54ae1909 -> bb67a6534


PHOENIX-4765 Add client and server side config property to enable rollback of 
splittable System Catalog if required


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bb67a653
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bb67a653
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bb67a653

Branch: refs/heads/4.x-HBase-1.3
Commit: bb67a653423e804a9f0b4b7e11d114f922427e5e
Parents: d54ae19
Author: Thomas D'Silva 
Authored: Tue Nov 20 12:10:05 2018 -0800
Committer: Thomas D'Silva 
Committed: Mon Nov 26 16:35:56 2018 -0800

--
 .../apache/phoenix/end2end/SystemCatalogIT.java | 40 -
 .../coprocessor/MetaDataEndpointImpl.java   | 90 ++--
 .../phoenix/coprocessor/MetaDataProtocol.java   |  5 +-
 .../org/apache/phoenix/query/QueryServices.java | 17 
 .../phoenix/query/QueryServicesOptions.java |  2 +
 .../apache/phoenix/schema/MetaDataClient.java   | 26 +-
 6 files changed, 146 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/bb67a653/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
index 8a41fad..dd3721f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -31,10 +32,12 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -44,11 +47,12 @@ import com.google.common.collect.Maps;
 @Category(NeedsOwnMiniClusterTest.class)
 public class SystemCatalogIT extends BaseTest {
 private HBaseTestingUtility testUtil = null;
-
+
@BeforeClass
public static void doSetup() throws Exception {
Map serverProps = 
Maps.newHashMapWithExpectedSize(1);
serverProps.put(QueryServices.SYSTEM_CATALOG_SPLITTABLE, 
"false");
+
serverProps.put(QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK, "true");
Map clientProps = Collections.emptyMap();
setUpTestDriver(new 
ReadOnlyProps(serverProps.entrySet().iterator()),
new 
ReadOnlyProps(clientProps.entrySet().iterator()));
@@ -87,7 +91,8 @@ public class SystemCatalogIT extends BaseTest {
 Statement stmt = conn.createStatement();) {
 stmt.execute("DROP TABLE IF EXISTS " + tableName);
 stmt.execute("CREATE TABLE " + tableName
-+ " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 
VARCHAR CONSTRAINT PK PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT=true");
++ " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 
VARCHAR CONSTRAINT PK " +
+"PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT=true");
 try (Connection tenant1Conn = getTenantConnection("tenant1")) {
 String view1DDL = "CREATE VIEW " + tableName + "_view AS 
SELECT * FROM " + tableName;
 tenant1Conn.createStatement().execute(view1DDL);
@@ -97,7 +102,7 @@ public class SystemCatalogIT extends BaseTest {
 }
 
 private String getJdbcUrl() {
-return "jdbc:phoenix:localhost:" + 
testUtil.getZkCluster().getClientPort() + ":/hbase";
+return "jdbc:phoenix:localhost:" + 
getUtility().getZkCluster().getClientPort() + ":/hbase";
 }
 
 private Connection getTenantConnection(String tenantId) throws 
SQLException {
@@ -105,4 +110,31 @@ public class SystemCatalogIT extends BaseTest {
 tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
 return DriverManager.getConnection(getJdbcUrl(), tenantProps);
 }
-}
+
+/**
+ * Ensure that we cannot add a column to a base table if 
QueryServices.BLOCK_METADATA_CHANGES_REQUIRE_PROPAGATION
+  

phoenix git commit: PHOENIX-4765 Add client and server side config property to enable rollback of splittable System Catalog if required

2018-11-26 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 5997a567e -> 3ca3552d7


PHOENIX-4765 Add client and server side config property to enable rollback of 
splittable System Catalog if required


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3ca3552d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3ca3552d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3ca3552d

Branch: refs/heads/4.x-HBase-1.4
Commit: 3ca3552d7fdd152dc1d904c0c353b410d1170d65
Parents: 5997a56
Author: Thomas D'Silva 
Authored: Tue Nov 20 12:10:05 2018 -0800
Committer: Thomas D'Silva 
Committed: Mon Nov 26 16:35:59 2018 -0800

--
 .../apache/phoenix/end2end/SystemCatalogIT.java | 40 -
 .../coprocessor/MetaDataEndpointImpl.java   | 90 ++--
 .../phoenix/coprocessor/MetaDataProtocol.java   |  5 +-
 .../org/apache/phoenix/query/QueryServices.java | 17 
 .../phoenix/query/QueryServicesOptions.java |  2 +
 .../apache/phoenix/schema/MetaDataClient.java   | 26 +-
 6 files changed, 146 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3ca3552d/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
index 8a41fad..dd3721f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -31,10 +32,12 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -44,11 +47,12 @@ import com.google.common.collect.Maps;
 @Category(NeedsOwnMiniClusterTest.class)
 public class SystemCatalogIT extends BaseTest {
 private HBaseTestingUtility testUtil = null;
-
+
@BeforeClass
public static void doSetup() throws Exception {
Map serverProps = 
Maps.newHashMapWithExpectedSize(1);
serverProps.put(QueryServices.SYSTEM_CATALOG_SPLITTABLE, 
"false");
+
serverProps.put(QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK, "true");
Map clientProps = Collections.emptyMap();
setUpTestDriver(new 
ReadOnlyProps(serverProps.entrySet().iterator()),
new 
ReadOnlyProps(clientProps.entrySet().iterator()));
@@ -87,7 +91,8 @@ public class SystemCatalogIT extends BaseTest {
 Statement stmt = conn.createStatement();) {
 stmt.execute("DROP TABLE IF EXISTS " + tableName);
 stmt.execute("CREATE TABLE " + tableName
-+ " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 
VARCHAR CONSTRAINT PK PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT=true");
++ " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 
VARCHAR CONSTRAINT PK " +
+"PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT=true");
 try (Connection tenant1Conn = getTenantConnection("tenant1")) {
 String view1DDL = "CREATE VIEW " + tableName + "_view AS 
SELECT * FROM " + tableName;
 tenant1Conn.createStatement().execute(view1DDL);
@@ -97,7 +102,7 @@ public class SystemCatalogIT extends BaseTest {
 }
 
 private String getJdbcUrl() {
-return "jdbc:phoenix:localhost:" + 
testUtil.getZkCluster().getClientPort() + ":/hbase";
+return "jdbc:phoenix:localhost:" + 
getUtility().getZkCluster().getClientPort() + ":/hbase";
 }
 
 private Connection getTenantConnection(String tenantId) throws 
SQLException {
@@ -105,4 +110,31 @@ public class SystemCatalogIT extends BaseTest {
 tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
 return DriverManager.getConnection(getJdbcUrl(), tenantProps);
 }
-}
+
+/**
+ * Ensure that we cannot add a column to a base table if 
QueryServices.BLOCK_METADATA_CHANGES_REQUIRE_PROPAGATION
+  

phoenix git commit: PHOENIX-4765 Add client and server side config property to enable rollback of splittable System Catalog if required

2018-11-26 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 f9ee6ee78 -> c86b3e42e


PHOENIX-4765 Add client and server side config property to enable rollback of 
splittable System Catalog if required


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c86b3e42
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c86b3e42
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c86b3e42

Branch: refs/heads/4.x-HBase-1.2
Commit: c86b3e42ee8c7450ff708268bf3aaa4d7eede794
Parents: f9ee6ee
Author: Thomas D'Silva 
Authored: Tue Nov 20 12:10:05 2018 -0800
Committer: Thomas D'Silva 
Committed: Mon Nov 26 16:35:47 2018 -0800

--
 .../apache/phoenix/end2end/SystemCatalogIT.java | 40 -
 .../coprocessor/MetaDataEndpointImpl.java   | 90 ++--
 .../phoenix/coprocessor/MetaDataProtocol.java   |  5 +-
 .../org/apache/phoenix/query/QueryServices.java | 17 
 .../phoenix/query/QueryServicesOptions.java |  2 +
 .../apache/phoenix/schema/MetaDataClient.java   | 26 +-
 6 files changed, 146 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c86b3e42/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
index 8a41fad..dd3721f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -31,10 +32,12 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -44,11 +47,12 @@ import com.google.common.collect.Maps;
 @Category(NeedsOwnMiniClusterTest.class)
 public class SystemCatalogIT extends BaseTest {
 private HBaseTestingUtility testUtil = null;
-
+
@BeforeClass
public static void doSetup() throws Exception {
Map serverProps = 
Maps.newHashMapWithExpectedSize(1);
serverProps.put(QueryServices.SYSTEM_CATALOG_SPLITTABLE, 
"false");
+
serverProps.put(QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK, "true");
Map clientProps = Collections.emptyMap();
setUpTestDriver(new 
ReadOnlyProps(serverProps.entrySet().iterator()),
new 
ReadOnlyProps(clientProps.entrySet().iterator()));
@@ -87,7 +91,8 @@ public class SystemCatalogIT extends BaseTest {
 Statement stmt = conn.createStatement();) {
 stmt.execute("DROP TABLE IF EXISTS " + tableName);
 stmt.execute("CREATE TABLE " + tableName
-+ " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 
VARCHAR CONSTRAINT PK PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT=true");
++ " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 
VARCHAR CONSTRAINT PK " +
+"PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT=true");
 try (Connection tenant1Conn = getTenantConnection("tenant1")) {
 String view1DDL = "CREATE VIEW " + tableName + "_view AS 
SELECT * FROM " + tableName;
 tenant1Conn.createStatement().execute(view1DDL);
@@ -97,7 +102,7 @@ public class SystemCatalogIT extends BaseTest {
 }
 
 private String getJdbcUrl() {
-return "jdbc:phoenix:localhost:" + 
testUtil.getZkCluster().getClientPort() + ":/hbase";
+return "jdbc:phoenix:localhost:" + 
getUtility().getZkCluster().getClientPort() + ":/hbase";
 }
 
 private Connection getTenantConnection(String tenantId) throws 
SQLException {
@@ -105,4 +110,31 @@ public class SystemCatalogIT extends BaseTest {
 tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
 return DriverManager.getConnection(getJdbcUrl(), tenantProps);
 }
-}
+
+/**
+ * Ensure that we cannot add a column to a base table if 
QueryServices.BLOCK_METADATA_CHANGES_REQUIRE_PROPAGATION
+  

phoenix git commit: PHOENIX-5031 Fix TenantSpecificViewIndexIT test failures in HBase 1.2 branch

2018-11-26 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 7b1ee3ce7 -> f9ee6ee78


PHOENIX-5031 Fix TenantSpecificViewIndexIT test failures in HBase 1.2 branch


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f9ee6ee7
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f9ee6ee7
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f9ee6ee7

Branch: refs/heads/4.x-HBase-1.2
Commit: f9ee6ee78deb33a20753b27528cfe71c022278b9
Parents: 7b1ee3c
Author: Thomas D'Silva 
Authored: Mon Nov 26 14:48:10 2018 -0800
Committer: Thomas D'Silva 
Committed: Mon Nov 26 14:48:10 2018 -0800

--
 .../org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f9ee6ee7/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java
index ea8f004..a317693 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java
@@ -130,8 +130,8 @@ public class TenantSpecificViewIndexIT extends 
BaseTenantSpecificViewIndexIT {
 String sequenceNameA = 
getViewIndexSequenceName(PNameFactory.newName(tableName), 
PNameFactory.newName(tenantId2), isNamespaceEnabled);
 String sequenceNameB = 
getViewIndexSequenceName(PNameFactory.newName(tableName), 
PNameFactory.newName(tenantId1), isNamespaceEnabled);
 String sequenceSchemaName = 
getViewIndexSequenceSchemaName(PNameFactory.newName(tableName), 
isNamespaceEnabled);
-verifySequenceValue(isNamespaceEnabled? tenantId2 : null, 
sequenceNameA, sequenceSchemaName, -32767);
-verifySequenceValue(isNamespaceEnabled? tenantId1 : null, 
sequenceNameB, sequenceSchemaName, -32767);
+verifySequenceValue(isNamespaceEnabled? tenantId2 : null, 
sequenceNameA, sequenceSchemaName, -9223372036854775807L);
+verifySequenceValue(isNamespaceEnabled? tenantId1 : null, 
sequenceNameB, sequenceSchemaName, -9223372036854775807L);
 
 Properties props = new Properties();
 props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId2);



phoenix git commit: PHOENIX-4971 Drop index will execute successfully using Incorrect name of parent tables

2018-11-26 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 2e4cd0ddb -> 5997a567e


PHOENIX-4971 Drop index will execute successfully using Incorrect name of 
parent tables


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5997a567
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5997a567
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5997a567

Branch: refs/heads/4.x-HBase-1.4
Commit: 5997a567ef05d3dc25d9452b1f0e75dda830b529
Parents: 2e4cd0d
Author: Jaanai 
Authored: Sun Nov 25 01:22:49 2018 +0800
Committer: Thomas D'Silva 
Committed: Mon Nov 26 10:21:27 2018 -0800

--
 .../java/org/apache/phoenix/end2end/ViewIT.java | 76 ++--
 .../phoenix/end2end/index/DropMetadataIT.java   | 23 +-
 .../phoenix/end2end/index/IndexMetadataIT.java  |  5 +-
 .../coprocessor/MetaDataEndpointImpl.java   |  2 +-
 .../phoenix/exception/SQLExceptionCode.java |  2 +
 .../apache/phoenix/schema/MetaDataClient.java   | 16 +
 6 files changed, 83 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5997a567/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 090ccaa..6318dca 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -908,60 +909,61 @@ public class ViewIT extends SplitSystemCatalogIT {
 props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
Boolean.TRUE.toString());
 
 try (Connection conn = DriverManager.getConnection(getUrl(), props);
-HBaseAdmin admin =
-
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+Admin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
 
 conn.createStatement().execute("CREATE SCHEMA " + NS);
 
 // test for a view that is in non-default schema
-HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(NS, 
TBL));
-desc.addFamily(new HColumnDescriptor(CF));
-admin.createTable(desc);
+{
+HTableDescriptor desc = new 
HTableDescriptor(TableName.valueOf(NS, TBL));
+desc.addFamily(new HColumnDescriptor(CF));
+admin.createTable(desc);
 
-String view1 = NS + "." + TBL;
-conn.createStatement().execute(
-"CREATE VIEW " + view1 + " (PK VARCHAR PRIMARY KEY, " + CF + 
".COL VARCHAR)");
+String view1 = NS + "." + TBL;
+conn.createStatement().execute(
+"CREATE VIEW " + view1 + " (PK VARCHAR PRIMARY KEY, " 
+ CF + ".COL VARCHAR)");
 
-assertTrue(QueryUtil
-.getExplainPlan(
+assertTrue(QueryUtil.getExplainPlan(
 conn.createStatement().executeQuery("explain select * 
from " + view1))
-.contains(NS + ":" + TBL));
+.contains(NS + ":" + TBL));
 
-
+conn.createStatement().execute("DROP VIEW " + view1);
+}
+
+// test for a view whose name contains a dot (e.g. "AAA.BBB") in 
default schema (for backward compatibility)
+{
+HTableDescriptor desc = new 
HTableDescriptor(TableName.valueOf(NS + "." + TBL));
+desc.addFamily(new HColumnDescriptor(CF));
+admin.createTable(desc);
 
-// test for a view whose name contains a dot (e.g. "AAA.BBB") in 
default schema (for
-// backward compatibility)
-desc = new HTableDescriptor(TableName.valueOf(NS + "." + TBL));
-desc.addFamily(new HColumnDescriptor(CF));
-admin.createTable(desc);
+String view2 = "\"" + NS + "." + TBL + "\"";
+conn.createStatement().execute(
+"CREATE VIEW " + view2 + " (PK VARCHAR PRIMARY KEY, " 
+ CF + ".COL VARCHAR)");
 
-String view2 = "\"" + NS + "." + TBL + "\"";
-conn.createStatement().execute(
-"CREATE VIEW "

phoenix git commit: PHOENIX-4971 Drop index will execute successfully using Incorrect name of parent tables

2018-11-26 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 3508086ce -> 7b1ee3ce7


PHOENIX-4971 Drop index will execute successfully using Incorrect name of 
parent tables


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7b1ee3ce
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7b1ee3ce
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7b1ee3ce

Branch: refs/heads/4.x-HBase-1.2
Commit: 7b1ee3ce70830f369ce9058f2c1981e7747b87d0
Parents: 3508086
Author: Jaanai 
Authored: Sun Nov 25 01:22:49 2018 +0800
Committer: Thomas D'Silva 
Committed: Mon Nov 26 10:21:01 2018 -0800

--
 .../java/org/apache/phoenix/end2end/ViewIT.java | 76 ++--
 .../phoenix/end2end/index/DropMetadataIT.java   | 23 +-
 .../phoenix/end2end/index/IndexMetadataIT.java  |  5 +-
 .../coprocessor/MetaDataEndpointImpl.java   |  2 +-
 .../phoenix/exception/SQLExceptionCode.java |  2 +
 .../apache/phoenix/schema/MetaDataClient.java   | 16 +
 6 files changed, 83 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7b1ee3ce/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 090ccaa..6318dca 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -908,60 +909,61 @@ public class ViewIT extends SplitSystemCatalogIT {
 props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
Boolean.TRUE.toString());
 
 try (Connection conn = DriverManager.getConnection(getUrl(), props);
-HBaseAdmin admin =
-
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+Admin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
 
 conn.createStatement().execute("CREATE SCHEMA " + NS);
 
 // test for a view that is in non-default schema
-HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(NS, 
TBL));
-desc.addFamily(new HColumnDescriptor(CF));
-admin.createTable(desc);
+{
+HTableDescriptor desc = new 
HTableDescriptor(TableName.valueOf(NS, TBL));
+desc.addFamily(new HColumnDescriptor(CF));
+admin.createTable(desc);
 
-String view1 = NS + "." + TBL;
-conn.createStatement().execute(
-"CREATE VIEW " + view1 + " (PK VARCHAR PRIMARY KEY, " + CF + 
".COL VARCHAR)");
+String view1 = NS + "." + TBL;
+conn.createStatement().execute(
+"CREATE VIEW " + view1 + " (PK VARCHAR PRIMARY KEY, " 
+ CF + ".COL VARCHAR)");
 
-assertTrue(QueryUtil
-.getExplainPlan(
+assertTrue(QueryUtil.getExplainPlan(
 conn.createStatement().executeQuery("explain select * 
from " + view1))
-.contains(NS + ":" + TBL));
+.contains(NS + ":" + TBL));
 
-
+conn.createStatement().execute("DROP VIEW " + view1);
+}
+
+// test for a view whose name contains a dot (e.g. "AAA.BBB") in 
default schema (for backward compatibility)
+{
+HTableDescriptor desc = new 
HTableDescriptor(TableName.valueOf(NS + "." + TBL));
+desc.addFamily(new HColumnDescriptor(CF));
+admin.createTable(desc);
 
-// test for a view whose name contains a dot (e.g. "AAA.BBB") in 
default schema (for
-// backward compatibility)
-desc = new HTableDescriptor(TableName.valueOf(NS + "." + TBL));
-desc.addFamily(new HColumnDescriptor(CF));
-admin.createTable(desc);
+String view2 = "\"" + NS + "." + TBL + "\"";
+conn.createStatement().execute(
+"CREATE VIEW " + view2 + " (PK VARCHAR PRIMARY KEY, " 
+ CF + ".COL VARCHAR)");
 
-String view2 = "\"" + NS + "." + TBL + "\"";
-conn.createStatement().execute(
-"CREATE VIEW "

phoenix git commit: PHOENIX-4971 Drop index will execute successfully using Incorrect name of parent tables

2018-11-26 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/master d821ea30c -> 12c8f1b1f


PHOENIX-4971 Drop index will execute successfully using Incorrect name of 
parent tables


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/12c8f1b1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/12c8f1b1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/12c8f1b1

Branch: refs/heads/master
Commit: 12c8f1b1f67e56cd4f3f2a0dde9035a5b72b9de9
Parents: d821ea3
Author: Jaanai 
Authored: Fri Nov 23 21:16:20 2018 +0800
Committer: Thomas D'Silva 
Committed: Mon Nov 26 10:22:05 2018 -0800

--
 .../java/org/apache/phoenix/end2end/ViewIT.java | 51 +---
 .../phoenix/end2end/index/DropMetadataIT.java   | 23 -
 .../phoenix/end2end/index/IndexMetadataIT.java  |  5 +-
 .../coprocessor/MetaDataEndpointImpl.java   |  2 +-
 .../phoenix/exception/SQLExceptionCode.java |  2 +
 .../apache/phoenix/schema/MetaDataClient.java   | 16 ++
 6 files changed, 68 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/12c8f1b1/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 572846e..bcf7eca 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -927,55 +927,52 @@ public class ViewIT extends SplitSystemCatalogIT {
 TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(NS, TBL));
 builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(CF));
 admin.createTable(builder.build());
-}
 
-String view1 = NS + "." + TBL;
-conn.createStatement().execute(
-"CREATE VIEW " + view1 + " (PK VARCHAR PRIMARY KEY, " + CF + 
".COL VARCHAR)");
+String view1 = NS + "." + TBL;
+conn.createStatement().execute(
+"CREATE VIEW " + view1 + " (PK VARCHAR PRIMARY KEY, " 
+ CF + ".COL VARCHAR)");
 
-assertTrue(QueryUtil
-.getExplainPlan(
-conn.createStatement().executeQuery("explain select * 
from " + view1))
+assertTrue(QueryUtil.getExplainPlan(
+conn.createStatement().executeQuery("explain select * from 
" + view1))
 .contains(NS + ":" + TBL));
 
-
+conn.createStatement().execute("DROP VIEW " + view1);
+}
 
 // test for a view whose name contains a dot (e.g. "AAA.BBB") in 
default schema (for backward compatibility)
 {
 TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(NS + "." + TBL));
 builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(CF));
 admin.createTable(builder.build());
-}
 
-String view2 = "\"" + NS + "." + TBL + "\"";
-conn.createStatement().execute(
-"CREATE VIEW " + view2 + " (PK VARCHAR PRIMARY KEY, " + CF + 
".COL VARCHAR)");
+String view2 = "\"" + NS + "." + TBL + "\"";
+conn.createStatement().execute(
+"CREATE VIEW " + view2 + " (PK VARCHAR PRIMARY KEY, " + CF 
+ ".COL VARCHAR)");
 
-assertTrue(QueryUtil
-.getExplainPlan(
-conn.createStatement().executeQuery("explain select * 
from " + view2))
-.contains(NS + "." + TBL));
+assertTrue(QueryUtil
+.getExplainPlan(
+conn.createStatement().executeQuery("explain 
select * from " + view2))
+.contains(NS + "." + TBL));
+
+conn.createStatement().execute("DROP VIEW " + view2);
+}
 
 // test for a view whose name contains a dot (e.g. "AAA.BBB") in 
non-default schema
 {
 TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(NS, NS + "." + TBL));
 builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(CF));
 admin.createTable(builder.build());
-}
 
-String view3 = NS + ".\"" + NS + "." + TBL + "\"";
-conn.createStatement().execute(
-"CREATE VIEW " + view3 + " (PK VARCHAR PRIMARY KEY, " + CF + 
".COL VARCHAR)");
+String view3 = NS + ".\"" + NS + "." + TBL + "\"";
+conn.createStatement().

phoenix git commit: PHOENIX-4971 Drop index will execute successfully using Incorrect name of parent tables

2018-11-26 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 8a7295dc4 -> d54ae1909


PHOENIX-4971 Drop index will execute successfully using Incorrect name of 
parent tables


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d54ae190
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d54ae190
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d54ae190

Branch: refs/heads/4.x-HBase-1.3
Commit: d54ae1909923e23f76bc236ad4413ee7c30983e6
Parents: 8a7295d
Author: Jaanai 
Authored: Sun Nov 25 01:22:49 2018 +0800
Committer: Thomas D'Silva 
Committed: Mon Nov 26 10:21:17 2018 -0800

--
 .../java/org/apache/phoenix/end2end/ViewIT.java | 76 ++--
 .../phoenix/end2end/index/DropMetadataIT.java   | 23 +-
 .../phoenix/end2end/index/IndexMetadataIT.java  |  5 +-
 .../coprocessor/MetaDataEndpointImpl.java   |  2 +-
 .../phoenix/exception/SQLExceptionCode.java |  2 +
 .../apache/phoenix/schema/MetaDataClient.java   | 16 +
 6 files changed, 83 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d54ae190/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 090ccaa..6318dca 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -908,60 +909,61 @@ public class ViewIT extends SplitSystemCatalogIT {
 props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
Boolean.TRUE.toString());
 
 try (Connection conn = DriverManager.getConnection(getUrl(), props);
-HBaseAdmin admin =
-
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+Admin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
 
 conn.createStatement().execute("CREATE SCHEMA " + NS);
 
 // test for a view that is in non-default schema
-HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(NS, 
TBL));
-desc.addFamily(new HColumnDescriptor(CF));
-admin.createTable(desc);
+{
+HTableDescriptor desc = new 
HTableDescriptor(TableName.valueOf(NS, TBL));
+desc.addFamily(new HColumnDescriptor(CF));
+admin.createTable(desc);
 
-String view1 = NS + "." + TBL;
-conn.createStatement().execute(
-"CREATE VIEW " + view1 + " (PK VARCHAR PRIMARY KEY, " + CF + 
".COL VARCHAR)");
+String view1 = NS + "." + TBL;
+conn.createStatement().execute(
+"CREATE VIEW " + view1 + " (PK VARCHAR PRIMARY KEY, " 
+ CF + ".COL VARCHAR)");
 
-assertTrue(QueryUtil
-.getExplainPlan(
+assertTrue(QueryUtil.getExplainPlan(
 conn.createStatement().executeQuery("explain select * 
from " + view1))
-.contains(NS + ":" + TBL));
+.contains(NS + ":" + TBL));
 
-
+conn.createStatement().execute("DROP VIEW " + view1);
+}
+
+// test for a view whose name contains a dot (e.g. "AAA.BBB") in 
default schema (for backward compatibility)
+{
+HTableDescriptor desc = new 
HTableDescriptor(TableName.valueOf(NS + "." + TBL));
+desc.addFamily(new HColumnDescriptor(CF));
+admin.createTable(desc);
 
-// test for a view whose name contains a dot (e.g. "AAA.BBB") in 
default schema (for
-// backward compatibility)
-desc = new HTableDescriptor(TableName.valueOf(NS + "." + TBL));
-desc.addFamily(new HColumnDescriptor(CF));
-admin.createTable(desc);
+String view2 = "\"" + NS + "." + TBL + "\"";
+conn.createStatement().execute(
+"CREATE VIEW " + view2 + " (PK VARCHAR PRIMARY KEY, " 
+ CF + ".COL VARCHAR)");
 
-String view2 = "\"" + NS + "." + TBL + "\"";
-conn.createStatement().execute(
-"CREATE VIEW "

[1/2] phoenix git commit: PHOENIX-5021 Remove SplitIT from the 4.x-HBase-1.4 and master branch

2018-11-15 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/master cd31ed5e8 -> 9e7da9a17


PHOENIX-5021 Remove SplitIT from the 4.x-HBase-1.4 and master branch


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/316014f9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/316014f9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/316014f9

Branch: refs/heads/master
Commit: 316014f9b79c3ef8ca1571cd224041e76af84845
Parents: cd31ed5
Author: Thomas D'Silva 
Authored: Thu Nov 15 19:06:02 2018 -0800
Committer: Thomas D'Silva 
Committed: Thu Nov 15 19:12:08 2018 -0800

--
 .../org/apache/phoenix/end2end/SplitIT.java | 248 ---
 1 file changed, 248 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/316014f9/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitIT.java
deleted file mode 100644
index 60694ff..000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitIT.java
+++ /dev/null
@@ -1,248 +0,0 @@
-package org.apache.phoenix.end2end;
-
-import com.google.common.collect.Maps;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.phoenix.hbase.index.Indexer;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
-import org.apache.phoenix.schema.types.PInteger;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.sql.*;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.*;
-
-public class SplitIT extends BaseUniqueNamesOwnClusterIT {
-private static final String SPLIT_TABLE_NAME_PREFIX = "SPLIT_TABLE_";
-private static boolean tableWasSplitDuringScannerNext = false;
-private static byte[] splitPoint = null;
-
-@BeforeClass
-public static void doSetup() throws Exception {
-Map serverProps = Maps.newHashMapWithExpectedSize(1);
-serverProps.put("hbase.coprocessor.region.classes", 
TestRegionObserver.class.getName());
-serverProps.put(Indexer.CHECK_VERSION_CONF_KEY, "false");
-Map clientProps = Maps.newHashMapWithExpectedSize(3);
-clientProps.put(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, 
Integer.toString(10));
-// read rows in batches 3 at time
-clientProps.put(QueryServices.SCAN_CACHE_SIZE_ATTRIB, 
Integer.toString(3));
-setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(clientProps.entrySet().iterator()));
-}
-
-public static class TestRegionObserver extends BaseRegionObserver {
-
-@Override
-public boolean postScannerNext(final 
ObserverContext c,
-   final InternalScanner s, final 
List results, final int limit,
-   final boolean hasMore) throws 
IOException {
-Region region = c.getEnvironment().getRegion();
-String tableName = 
region.getRegionInfo().getTable().getNameAsString();
-if (tableName.startsWith(SPLIT_TABLE_NAME_PREFIX) && 
results.size()>1) {
-int pk = 
(Integer)PInteger.INSTANCE.toObject(results.get(0).getRow());
-// split when row 10 is read
-if (pk==10 && !tableWasSplitDuringScannerNext) {
-try {
-// split on the first row being scanned if splitPoint 
is null
-splitPoint = splitPoint!=null ? splitPoint : 
results.get(0).getRow();
-splitTable(splitPoint, TableName.valueOf(tableName));
-tableWasSplitDuringScannerNext = true;
-}
-catch (SQLException e) {
-throw new IOException(e);
-}
-}
-}
-return hasMore;
-}
-
-}
-
-public static void splitTable(byte[] splitPoint, TableName tableName) 
throws SQLException, IOException {
-Admin admin = driver.getConnectionQueryServices(getUrl(), 
TestUtil.TEST_PROPERTIES).getAdmin();
-int nRegions

[1/2] phoenix git commit: PHOENIX-5021 Fix failing tests in SplitIT in the 4.x-HBase-1.4 branch

2018-11-15 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 3b39feec5 -> 3d196a2e1


PHOENIX-5021 Fix failing tests in SplitIT in the 4.x-HBase-1.4 branch


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7328feb4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7328feb4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7328feb4

Branch: refs/heads/4.x-HBase-1.4
Commit: 7328feb4fc56099152ff2b175645f1b64ff2f73b
Parents: 3b39fee
Author: Thomas D'Silva 
Authored: Thu Nov 15 18:29:40 2018 -0800
Committer: Thomas D'Silva 
Committed: Thu Nov 15 19:03:28 2018 -0800

--
 .../org/apache/phoenix/end2end/SplitIT.java | 277 ---
 1 file changed, 277 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7328feb4/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitIT.java
deleted file mode 100644
index 482ad5a..000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SplitIT.java
+++ /dev/null
@@ -1,277 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import com.google.common.collect.Maps;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.phoenix.hbase.index.Indexer;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
-import org.apache.phoenix.schema.types.PInteger;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.sql.*;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.*;
-
-public class SplitIT extends BaseUniqueNamesOwnClusterIT {
-private static final String SPLIT_TABLE_NAME_PREFIX = "SPLIT_TABLE_";
-private static boolean tableWasSplitDuringScannerNext = false;
-private static byte[] splitPoint = null;
-
-@BeforeClass
-public static void doSetup() throws Exception {
-Map serverProps = Maps.newHashMapWithExpectedSize(1);
-serverProps.put("hbase.coprocessor.region.classes", 
TestRegionObserver.class.getName());
-serverProps.put(Indexer.CHECK_VERSION_CONF_KEY, "false");
-Map clientProps = Maps.newHashMapWithExpectedSize(3);
-clientProps.put(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, 
Integer.toString(10));
-// read rows in batches 3 at time
-clientProps.put(QueryServices.SCAN_CACHE_SIZE_ATTRIB, 
Integer.toString(3));
-setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(clientProps.entrySet().iterator()));
-}
-
-public static class TestRegionObserver extends BaseRegionObserver {
-
-@Override
-public boolean postScannerNext(final 
ObserverContext c,
-   final InternalScanner s, final 
List results, final int limit,
-   final boolean hasMore) throws 
IOException {
-Region region = c.getEnvironment().getRegion();
-String tableName = 
region.getRegionInfo().getTable().getNameAsString();
-if (tableName.startsWith(SPLIT_TABLE_NAME_PREFIX) && 
results.size()>1) {
-int pk = 
(Integer)PInteger.INSTANCE.toObject(results.get(0).getRow());
-// split when row 10 is read
-if (pk==10 && !tableWasSplitDuringScannerNext) {
-try

phoenix git commit: PHOENIX-5013 Increase timeout for Tephra discovery service

2018-11-15 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 4581516ea -> 89bbdfd9e


PHOENIX-5013 Increase timeout for Tephra discovery service


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/89bbdfd9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/89bbdfd9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/89bbdfd9

Branch: refs/heads/4.x-HBase-1.3
Commit: 89bbdfd9e0ad427d9e9771608e2d32b631319d2e
Parents: 4581516
Author: Thomas D'Silva 
Authored: Thu Nov 15 12:33:26 2018 -0800
Committer: Thomas D'Silva 
Committed: Thu Nov 15 19:11:49 2018 -0800

--
 .../test/java/org/apache/phoenix/query/QueryServicesTestImpl.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/89bbdfd9/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
index 49fb8e8..eae951a 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
@@ -130,6 +130,7 @@ public final class QueryServicesTestImpl extends 
BaseQueryServicesImpl {
 .set(TxConstants.Service.CFG_DATA_TX_CLIENT_RETRY_STRATEGY, 
"n-times")
 .set(TxConstants.Service.CFG_DATA_TX_CLIENT_ATTEMPTS, 1)
 .set(TxConstants.Service.CFG_DATA_TX_BIND_PORT, 
TestUtil.getRandomPort())
+
.set(TxConstants.Service.CFG_DATA_TX_CLIENT_DISCOVERY_TIMEOUT_SEC, 60)
 .set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, 
Files.createTempDir().getAbsolutePath())
 .set(TxConstants.Manager.CFG_TX_TIMEOUT, 
DEFAULT_TXN_TIMEOUT_SECONDS)
 .set(TxConstants.Manager.CFG_TX_SNAPSHOT_INTERVAL, 5L)



[2/2] phoenix git commit: PHOENIX-5013 Increase timeout for Tephra discovery service

2018-11-15 Thread tdsilva
PHOENIX-5013 Increase timeout for Tephra discovery service


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9e7da9a1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9e7da9a1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9e7da9a1

Branch: refs/heads/master
Commit: 9e7da9a176ab931ffe87fc4f47a908ee1ef31915
Parents: 316014f
Author: Thomas D'Silva 
Authored: Thu Nov 15 12:33:26 2018 -0800
Committer: Thomas D'Silva 
Committed: Thu Nov 15 19:12:11 2018 -0800

--
 .../test/java/org/apache/phoenix/query/QueryServicesTestImpl.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9e7da9a1/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
index 49fb8e8..eae951a 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
@@ -130,6 +130,7 @@ public final class QueryServicesTestImpl extends 
BaseQueryServicesImpl {
 .set(TxConstants.Service.CFG_DATA_TX_CLIENT_RETRY_STRATEGY, 
"n-times")
 .set(TxConstants.Service.CFG_DATA_TX_CLIENT_ATTEMPTS, 1)
 .set(TxConstants.Service.CFG_DATA_TX_BIND_PORT, 
TestUtil.getRandomPort())
+
.set(TxConstants.Service.CFG_DATA_TX_CLIENT_DISCOVERY_TIMEOUT_SEC, 60)
 .set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, 
Files.createTempDir().getAbsolutePath())
 .set(TxConstants.Manager.CFG_TX_TIMEOUT, 
DEFAULT_TXN_TIMEOUT_SECONDS)
 .set(TxConstants.Manager.CFG_TX_SNAPSHOT_INTERVAL, 5L)



phoenix git commit: PHOENIX-5013 Increase timeout for Tephra discovery service

2018-11-15 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 73bec6fff -> 9e4e915f4


PHOENIX-5013 Increase timeout for Tephra discovery service


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9e4e915f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9e4e915f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9e4e915f

Branch: refs/heads/4.x-HBase-1.2
Commit: 9e4e915f45127d5eaa6b8746185fc6de214d7126
Parents: 73bec6f
Author: Thomas D'Silva 
Authored: Thu Nov 15 12:33:26 2018 -0800
Committer: Thomas D'Silva 
Committed: Thu Nov 15 19:11:36 2018 -0800

--
 .../test/java/org/apache/phoenix/query/QueryServicesTestImpl.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9e4e915f/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
index 49fb8e8..eae951a 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
@@ -130,6 +130,7 @@ public final class QueryServicesTestImpl extends 
BaseQueryServicesImpl {
 .set(TxConstants.Service.CFG_DATA_TX_CLIENT_RETRY_STRATEGY, 
"n-times")
 .set(TxConstants.Service.CFG_DATA_TX_CLIENT_ATTEMPTS, 1)
 .set(TxConstants.Service.CFG_DATA_TX_BIND_PORT, 
TestUtil.getRandomPort())
+
.set(TxConstants.Service.CFG_DATA_TX_CLIENT_DISCOVERY_TIMEOUT_SEC, 60)
 .set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, 
Files.createTempDir().getAbsolutePath())
 .set(TxConstants.Manager.CFG_TX_TIMEOUT, 
DEFAULT_TXN_TIMEOUT_SECONDS)
 .set(TxConstants.Manager.CFG_TX_SNAPSHOT_INTERVAL, 5L)



[2/2] phoenix git commit: PHOENIX-5013 Increase timeout for Tephra discovery service

2018-11-15 Thread tdsilva
PHOENIX-5013 Increase timeout for Tephra discovery service


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3d196a2e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3d196a2e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3d196a2e

Branch: refs/heads/4.x-HBase-1.4
Commit: 3d196a2e173e9af983e0d602cd8f63a27bdf4315
Parents: 7328feb
Author: Thomas D'Silva 
Authored: Thu Nov 15 12:33:26 2018 -0800
Committer: Thomas D'Silva 
Committed: Thu Nov 15 19:11:58 2018 -0800

--
 .../test/java/org/apache/phoenix/query/QueryServicesTestImpl.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d196a2e/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
index 49fb8e8..eae951a 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
@@ -130,6 +130,7 @@ public final class QueryServicesTestImpl extends 
BaseQueryServicesImpl {
 .set(TxConstants.Service.CFG_DATA_TX_CLIENT_RETRY_STRATEGY, 
"n-times")
 .set(TxConstants.Service.CFG_DATA_TX_CLIENT_ATTEMPTS, 1)
 .set(TxConstants.Service.CFG_DATA_TX_BIND_PORT, 
TestUtil.getRandomPort())
+
.set(TxConstants.Service.CFG_DATA_TX_CLIENT_DISCOVERY_TIMEOUT_SEC, 60)
 .set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, 
Files.createTempDir().getAbsolutePath())
 .set(TxConstants.Manager.CFG_TX_TIMEOUT, 
DEFAULT_TXN_TIMEOUT_SECONDS)
 .set(TxConstants.Manager.CFG_TX_SNAPSHOT_INTERVAL, 5L)



phoenix git commit: PHOENIX-4841 staging patch commit.

2018-11-15 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/master 6c286dbfe -> 3690c6323


PHOENIX-4841 staging patch commit.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3690c632
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3690c632
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3690c632

Branch: refs/heads/master
Commit: 3690c6323e3690e8811c326dc7c57c641e79aef5
Parents: 6c286db
Author: Daniel Wong 
Authored: Tue Oct 9 16:38:11 2018 -0700
Committer: Thomas D'Silva 
Committed: Thu Nov 15 14:21:24 2018 -0800

--
 .../org/apache/phoenix/end2end/QueryMoreIT.java | 174 +--
 .../apache/phoenix/compile/WhereOptimizer.java  |  58 ++-
 .../expression/ComparisonExpression.java|  18 +-
 .../RowValueConstructorExpressionRewriter.java  |  54 ++
 .../org/apache/phoenix/schema/RowKeySchema.java |   4 +
 ...wValueConstructorExpressionRewriterTest.java |  78 +
 6 files changed, 363 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3690c632/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 528fe7f..9e7e144 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -17,11 +17,14 @@
  */
 package org.apache.phoenix.end2end;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Test;
 
 import java.sql.Connection;
 import java.sql.Date;
@@ -37,19 +40,18 @@ import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.Test;
-
-import com.google.common.collect.Lists;
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 
 public class QueryMoreIT extends ParallelStatsDisabledIT {
 
+private final String TENANT_SPECIFIC_URL1 = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=tenant1";
+
 private String dataTableName;
 //queryAgainstTenantSpecificView = true, dataTableSalted = true 
 @Test
@@ -511,4 +513,148 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 stmt.execute();
 }
 }
+
+@Test public void testRVCWithDescAndAscendingPK() throws Exception {
+final Connection conn = DriverManager.getConnection(getUrl());
+String fullTableName = generateUniqueName();
+try (Statement stmt = conn.createStatement()) {
+stmt.execute("CREATE TABLE " + fullTableName + "(\n"
++ "ORGANIZATION_ID CHAR(15) NOT NULL,\n" + "SCORE 
VARCHAR NOT NULL,\n"
++ "ENTITY_ID VARCHAR NOT NULL\n"
++ "CONSTRAINT PAGE_SNAPSHOT_PK PRIMARY KEY (\n"
++ "ORGANIZATION_ID,\n" + "SCORE DESC,\n" + 
"ENTITY_ID\n"
++ ")\n" + ") MULTI_TENANT=TRUE");
+}
+
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','c','1')");
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','b','3')");
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','b','4')");
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','a','2')");
+conn.commit();
+
+try (Statement stmt = conn.createStatement()) {
+final ResultSet
+rs =
+stmt.executeQuery("SELECT score, entity_id \n" + "FROM " + 
fullTableName + "\n"
++ "WHERE organization

phoenix git commit: PHOENIX-4841 staging patch commit.

2018-11-15 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 4f8720b16 -> 590fec910


PHOENIX-4841 staging patch commit.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/590fec91
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/590fec91
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/590fec91

Branch: refs/heads/4.x-HBase-1.4
Commit: 590fec910051648bc71c577262eab3f978b659ca
Parents: 4f8720b
Author: Daniel Wong 
Authored: Tue Oct 9 16:38:11 2018 -0700
Committer: Thomas D'Silva 
Committed: Thu Nov 15 12:14:12 2018 -0800

--
 .../org/apache/phoenix/end2end/QueryMoreIT.java | 171 +--
 .../apache/phoenix/compile/WhereOptimizer.java  |  58 ++-
 .../expression/ComparisonExpression.java|  18 +-
 .../RowValueConstructorExpressionRewriter.java  |  54 ++
 .../org/apache/phoenix/schema/RowKeySchema.java |   4 +
 ...wValueConstructorExpressionRewriterTest.java |  78 +
 6 files changed, 362 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/590fec91/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 04272fa..2b1d31e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -17,11 +17,13 @@
  */
 package org.apache.phoenix.end2end;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Test;
 
 import java.sql.Connection;
 import java.sql.Date;
@@ -37,18 +39,19 @@ import java.util.Map;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.util.Base64;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.Test;
 
-import com.google.common.collect.Lists;
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 
 public class QueryMoreIT extends ParallelStatsDisabledIT {
 
+private final String TENANT_SPECIFIC_URL1 = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=tenant1";
+
 private String dataTableName;
 //queryAgainstTenantSpecificView = true, dataTableSalted = true 
 @Test
@@ -510,4 +513,148 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 stmt.execute();
 }
 }
+
+@Test public void testRVCWithDescAndAscendingPK() throws Exception {
+final Connection conn = DriverManager.getConnection(getUrl());
+String fullTableName = generateUniqueName();
+try (Statement stmt = conn.createStatement()) {
+stmt.execute("CREATE TABLE " + fullTableName + "(\n"
++ "ORGANIZATION_ID CHAR(15) NOT NULL,\n" + "SCORE 
VARCHAR NOT NULL,\n"
++ "ENTITY_ID VARCHAR NOT NULL\n"
++ "CONSTRAINT PAGE_SNAPSHOT_PK PRIMARY KEY (\n"
++ "ORGANIZATION_ID,\n" + "SCORE DESC,\n" + 
"ENTITY_ID\n"
++ ")\n" + ") MULTI_TENANT=TRUE");
+}
+
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','c','1')");
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','b','3')");
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','b','4')");
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','a','2')");
+conn.commit();
+
+try (Statement stmt = conn.createStatement()) {
+final ResultSet
+rs =
+stmt.executeQuery("SELECT score, entity_id \n" + "FROM " + 
fullTableName + "\n"
++ "WHERE organization_id = 'org1'\n"
++ "AND (

phoenix git commit: PHOENIX-4841 staging patch commit.

2018-11-15 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 55bccf6b0 -> d35870043


PHOENIX-4841 staging patch commit.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d3587004
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d3587004
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d3587004

Branch: refs/heads/4.x-HBase-1.2
Commit: d358700432c1b6f080f6ec9cbab43ea6f244db85
Parents: 55bccf6
Author: Daniel Wong 
Authored: Tue Oct 9 16:38:11 2018 -0700
Committer: Thomas D'Silva 
Committed: Thu Nov 15 12:00:37 2018 -0800

--
 .../org/apache/phoenix/end2end/QueryMoreIT.java | 171 +--
 .../apache/phoenix/compile/WhereOptimizer.java  |  58 ++-
 .../expression/ComparisonExpression.java|  18 +-
 .../RowValueConstructorExpressionRewriter.java  |  54 ++
 .../org/apache/phoenix/schema/RowKeySchema.java |   4 +
 ...wValueConstructorExpressionRewriterTest.java |  78 +
 6 files changed, 362 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3587004/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 04272fa..2b1d31e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -17,11 +17,13 @@
  */
 package org.apache.phoenix.end2end;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Test;
 
 import java.sql.Connection;
 import java.sql.Date;
@@ -37,18 +39,19 @@ import java.util.Map;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.util.Base64;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.Test;
 
-import com.google.common.collect.Lists;
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 
 public class QueryMoreIT extends ParallelStatsDisabledIT {
 
+private final String TENANT_SPECIFIC_URL1 = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=tenant1";
+
 private String dataTableName;
 //queryAgainstTenantSpecificView = true, dataTableSalted = true 
 @Test
@@ -510,4 +513,148 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 stmt.execute();
 }
 }
+
+@Test public void testRVCWithDescAndAscendingPK() throws Exception {
+final Connection conn = DriverManager.getConnection(getUrl());
+String fullTableName = generateUniqueName();
+try (Statement stmt = conn.createStatement()) {
+stmt.execute("CREATE TABLE " + fullTableName + "(\n"
++ "ORGANIZATION_ID CHAR(15) NOT NULL,\n" + "SCORE 
VARCHAR NOT NULL,\n"
++ "ENTITY_ID VARCHAR NOT NULL\n"
++ "CONSTRAINT PAGE_SNAPSHOT_PK PRIMARY KEY (\n"
++ "ORGANIZATION_ID,\n" + "SCORE DESC,\n" + 
"ENTITY_ID\n"
++ ")\n" + ") MULTI_TENANT=TRUE");
+}
+
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','c','1')");
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','b','3')");
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','b','4')");
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','a','2')");
+conn.commit();
+
+try (Statement stmt = conn.createStatement()) {
+final ResultSet
+rs =
+stmt.executeQuery("SELECT score, entity_id \n" + "FROM " + 
fullTableName + "\n"
++ "WHERE organization_id = 'org1'\n"
++ "AND (

phoenix git commit: PHOENIX-4841 staging patch commit.

2018-11-15 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 7b038fba5 -> 87e770296


PHOENIX-4841 staging patch commit.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/87e77029
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/87e77029
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/87e77029

Branch: refs/heads/4.x-HBase-1.3
Commit: 87e7702966ab7bc0afd52696e453f57ccc28b369
Parents: 7b038fb
Author: Daniel Wong 
Authored: Tue Oct 9 16:38:11 2018 -0700
Committer: Thomas D'Silva 
Committed: Thu Nov 15 12:00:51 2018 -0800

--
 .../org/apache/phoenix/end2end/QueryMoreIT.java | 171 +--
 .../apache/phoenix/compile/WhereOptimizer.java  |  58 ++-
 .../expression/ComparisonExpression.java|  18 +-
 .../RowValueConstructorExpressionRewriter.java  |  54 ++
 .../org/apache/phoenix/schema/RowKeySchema.java |   4 +
 ...wValueConstructorExpressionRewriterTest.java |  78 +
 6 files changed, 362 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/87e77029/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 04272fa..2b1d31e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -17,11 +17,13 @@
  */
 package org.apache.phoenix.end2end;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Test;
 
 import java.sql.Connection;
 import java.sql.Date;
@@ -37,18 +39,19 @@ import java.util.Map;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.util.Base64;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.Test;
 
-import com.google.common.collect.Lists;
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 
 public class QueryMoreIT extends ParallelStatsDisabledIT {
 
+private final String TENANT_SPECIFIC_URL1 = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=tenant1";
+
 private String dataTableName;
 //queryAgainstTenantSpecificView = true, dataTableSalted = true 
 @Test
@@ -510,4 +513,148 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 stmt.execute();
 }
 }
+
+@Test public void testRVCWithDescAndAscendingPK() throws Exception {
+final Connection conn = DriverManager.getConnection(getUrl());
+String fullTableName = generateUniqueName();
+try (Statement stmt = conn.createStatement()) {
+stmt.execute("CREATE TABLE " + fullTableName + "(\n"
++ "ORGANIZATION_ID CHAR(15) NOT NULL,\n" + "SCORE 
VARCHAR NOT NULL,\n"
++ "ENTITY_ID VARCHAR NOT NULL\n"
++ "CONSTRAINT PAGE_SNAPSHOT_PK PRIMARY KEY (\n"
++ "ORGANIZATION_ID,\n" + "SCORE DESC,\n" + 
"ENTITY_ID\n"
++ ")\n" + ") MULTI_TENANT=TRUE");
+}
+
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','c','1')");
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','b','3')");
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','b','4')");
+conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1','a','2')");
+conn.commit();
+
+try (Statement stmt = conn.createStatement()) {
+final ResultSet
+rs =
+stmt.executeQuery("SELECT score, entity_id \n" + "FROM " + 
fullTableName + "\n"
++ "WHERE organization_id = 'org1'\n"
++ "AND (

phoenix git commit: PHOENIX-5008 (Addendum): CQSI.init should not bubble up RetriableUpgradeException to client in case of an UpgradeRequiredException

2018-11-13 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 d30abc4b5 -> 55bccf6b0


PHOENIX-5008 (Addendum): CQSI.init should not bubble up 
RetriableUpgradeException to client in case of an UpgradeRequiredException


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/55bccf6b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/55bccf6b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/55bccf6b

Branch: refs/heads/4.x-HBase-1.2
Commit: 55bccf6b0abdcfa211f65d943371925137dbc068
Parents: d30abc4
Author: Chinmay Kulkarni 
Authored: Tue Nov 13 17:11:53 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Nov 13 17:27:16 2018 -0800

--
 .../phoenix/end2end/SystemCatalogCreationOnConnectionIT.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/55bccf6b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
index eadd391..7a5f80c 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
@@ -504,7 +504,7 @@ public class SystemCatalogCreationOnConnectionIT {
  */
 private Set getHBaseTables() throws IOException {
 Set tables = new HashSet<>();
-for (TableName tn : testUtil.getAdmin().listTableNames()) {
+for (TableName tn : testUtil.getHBaseAdmin().listTableNames()) {
 tables.add(tn.getNameAsString());
 }
 return tables;



phoenix git commit: PHOENIX-5008 (Addendum): CQSI.init should not bubble up RetriableUpgradeException to client in case of an UpgradeRequiredException

2018-11-13 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 b772b59b2 -> 4f8720b16


PHOENIX-5008 (Addendum): CQSI.init should not bubble up 
RetriableUpgradeException to client in case of an UpgradeRequiredException


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4f8720b1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4f8720b1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4f8720b1

Branch: refs/heads/4.x-HBase-1.4
Commit: 4f8720b16e724366cd6d15fef7609a8a09e812bf
Parents: b772b59
Author: Chinmay Kulkarni 
Authored: Tue Nov 13 17:11:53 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Nov 13 17:27:36 2018 -0800

--
 .../phoenix/end2end/SystemCatalogCreationOnConnectionIT.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4f8720b1/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
index eadd391..7a5f80c 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
@@ -504,7 +504,7 @@ public class SystemCatalogCreationOnConnectionIT {
  */
 private Set getHBaseTables() throws IOException {
 Set tables = new HashSet<>();
-for (TableName tn : testUtil.getAdmin().listTableNames()) {
+for (TableName tn : testUtil.getHBaseAdmin().listTableNames()) {
 tables.add(tn.getNameAsString());
 }
 return tables;



phoenix git commit: PHOENIX-5008 (Addendum): CQSI.init should not bubble up RetriableUpgradeException to client in case of an UpgradeRequiredException

2018-11-13 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 b2230b4f4 -> 7b038fba5


PHOENIX-5008 (Addendum): CQSI.init should not bubble up 
RetriableUpgradeException to client in case of an UpgradeRequiredException


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7b038fba
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7b038fba
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7b038fba

Branch: refs/heads/4.x-HBase-1.3
Commit: 7b038fba542173a91a7b919651ff067e1acc4fdf
Parents: b2230b4
Author: Chinmay Kulkarni 
Authored: Tue Nov 13 17:11:53 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Nov 13 17:27:28 2018 -0800

--
 .../phoenix/end2end/SystemCatalogCreationOnConnectionIT.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7b038fba/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
index eadd391..7a5f80c 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
@@ -504,7 +504,7 @@ public class SystemCatalogCreationOnConnectionIT {
  */
 private Set getHBaseTables() throws IOException {
 Set tables = new HashSet<>();
-for (TableName tn : testUtil.getAdmin().listTableNames()) {
+for (TableName tn : testUtil.getHBaseAdmin().listTableNames()) {
 tables.add(tn.getNameAsString());
 }
 return tables;



phoenix git commit: PHOENIX-5008: CQSI.init should not bubble up RetriableUpgradeException to client in case of an UpgradeRequiredException

2018-11-13 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 edb18c639 -> b772b59b2


PHOENIX-5008: CQSI.init should not bubble up RetriableUpgradeException to 
client in case of an UpgradeRequiredException


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b772b59b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b772b59b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b772b59b

Branch: refs/heads/4.x-HBase-1.4
Commit: b772b59b2d821364ed5167e7bcdf1c8288cd9615
Parents: edb18c6
Author: Chinmay Kulkarni 
Authored: Fri Nov 9 19:22:57 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Nov 13 16:42:11 2018 -0800

--
 .../SystemCatalogCreationOnConnectionIT.java| 97 +---
 .../query/ConnectionQueryServicesImpl.java  |  4 +-
 2 files changed, 84 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b772b59b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
index a1685c44..eadd391 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
@@ -21,9 +21,11 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.apache.phoenix.query.BaseTest.generateUniqueName;
 
 import java.io.IOException;
 import java.sql.Connection;
+import java.sql.DriverManager;
 import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -42,6 +44,7 @@ import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.UpgradeRequiredException;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
 import org.apache.phoenix.query.ConnectionQueryServices;
@@ -69,6 +72,12 @@ public class SystemCatalogCreationOnConnectionIT {
 private static final String PHOENIX_SYSTEM_CATALOG = "SYSTEM.CATALOG";
 private static final String EXECUTE_UPGRADE_COMMAND = "EXECUTE UPGRADE";
 private static final String MODIFIED_MAX_VERSIONS ="5";
+private static final String CREATE_TABLE_STMT = "CREATE TABLE %s"
++ " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY 
KEY(K1,K2))";
+private static final String SELECT_STMT = "SELECT * FROM %s";
+private static final String DELETE_STMT = "DELETE FROM %s";
+private static final String CREATE_INDEX_STMT = "CREATE INDEX DUMMY_IDX ON 
%s (K1) INCLUDE (K2)";
+private static final String UPSERT_STMT = "UPSERT INTO %s VALUES ('A', 
'B')";
 
 private static final Set PHOENIX_SYSTEM_TABLES = new 
HashSet<>(Arrays.asList(
   "SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", "SYSTEM.FUNCTION",
@@ -167,12 +176,8 @@ public class SystemCatalogCreationOnConnectionIT {
 UpgradeUtil.doNotUpgradeOnFirstConnection(propsDoNotUpgradePropSet);
 SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
   new 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
-try {
-driver.getConnectionQueryServices(getJdbcUrl(), 
propsDoNotUpgradePropSet);
-fail("Client should not be able to create SYSTEM.CATALOG since we 
set the doNotUpgrade property");
-} catch (Exception e) {
-assertTrue(e instanceof UpgradeRequiredException);
-}
+
+driver.getConnectionQueryServices(getJdbcUrl(), 
propsDoNotUpgradePropSet);
 hbaseTables = getHBaseTables();
 assertFalse(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG) || 
hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG));
 assertTrue(hbaseTables.size() == 0);
@@ -428,6 +433,70 @@ public class SystemCatalogCreationOnConnectionIT {
 assertEquals(Integer.parseInt(MODIFIED_MAX_VERSIONS), 
verifyModificationTableMetadata(driver, PHOENIX_SYSTEM_CATALOG));
 }
 
+// Test the case when an end-user uses the vanilla PhoenixDriver to create 
a connection and a
+// requirement for upgrade is detected. In this case, the user should get 
a connection on which
+// they are only able to run "EXECUTE UPGRADE"
+@Test
+public v

phoenix git commit: PHOENIX-5008: CQSI.init should not bubble up RetriableUpgradeException to client in case of an UpgradeRequiredException

2018-11-13 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 c7a400933 -> b2230b4f4


PHOENIX-5008: CQSI.init should not bubble up RetriableUpgradeException to 
client in case of an UpgradeRequiredException


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b2230b4f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b2230b4f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b2230b4f

Branch: refs/heads/4.x-HBase-1.3
Commit: b2230b4f4bd7e8c2e9fd83e3fceb07a307521c9e
Parents: c7a4009
Author: Chinmay Kulkarni 
Authored: Fri Nov 9 19:22:57 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Nov 13 16:42:06 2018 -0800

--
 .../SystemCatalogCreationOnConnectionIT.java| 97 +---
 .../query/ConnectionQueryServicesImpl.java  |  4 +-
 2 files changed, 84 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b2230b4f/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
index a1685c44..eadd391 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
@@ -21,9 +21,11 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.apache.phoenix.query.BaseTest.generateUniqueName;
 
 import java.io.IOException;
 import java.sql.Connection;
+import java.sql.DriverManager;
 import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -42,6 +44,7 @@ import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.UpgradeRequiredException;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
 import org.apache.phoenix.query.ConnectionQueryServices;
@@ -69,6 +72,12 @@ public class SystemCatalogCreationOnConnectionIT {
 private static final String PHOENIX_SYSTEM_CATALOG = "SYSTEM.CATALOG";
 private static final String EXECUTE_UPGRADE_COMMAND = "EXECUTE UPGRADE";
 private static final String MODIFIED_MAX_VERSIONS ="5";
+private static final String CREATE_TABLE_STMT = "CREATE TABLE %s"
++ " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY 
KEY(K1,K2))";
+private static final String SELECT_STMT = "SELECT * FROM %s";
+private static final String DELETE_STMT = "DELETE FROM %s";
+private static final String CREATE_INDEX_STMT = "CREATE INDEX DUMMY_IDX ON 
%s (K1) INCLUDE (K2)";
+private static final String UPSERT_STMT = "UPSERT INTO %s VALUES ('A', 
'B')";
 
 private static final Set PHOENIX_SYSTEM_TABLES = new 
HashSet<>(Arrays.asList(
   "SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", "SYSTEM.FUNCTION",
@@ -167,12 +176,8 @@ public class SystemCatalogCreationOnConnectionIT {
 UpgradeUtil.doNotUpgradeOnFirstConnection(propsDoNotUpgradePropSet);
 SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
   new 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
-try {
-driver.getConnectionQueryServices(getJdbcUrl(), 
propsDoNotUpgradePropSet);
-fail("Client should not be able to create SYSTEM.CATALOG since we 
set the doNotUpgrade property");
-} catch (Exception e) {
-assertTrue(e instanceof UpgradeRequiredException);
-}
+
+driver.getConnectionQueryServices(getJdbcUrl(), 
propsDoNotUpgradePropSet);
 hbaseTables = getHBaseTables();
 assertFalse(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG) || 
hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG));
 assertTrue(hbaseTables.size() == 0);
@@ -428,6 +433,70 @@ public class SystemCatalogCreationOnConnectionIT {
 assertEquals(Integer.parseInt(MODIFIED_MAX_VERSIONS), 
verifyModificationTableMetadata(driver, PHOENIX_SYSTEM_CATALOG));
 }
 
+// Test the case when an end-user uses the vanilla PhoenixDriver to create 
a connection and a
+// requirement for upgrade is detected. In this case, the user should get 
a connection on which
+// they are only able to run "EXECUTE UPGRADE"
+@Test
+public v

phoenix git commit: PHOENIX-5008: CQSI.init should not bubble up RetriableUpgradeException to client in case of an UpgradeRequiredException

2018-11-13 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 78ecfce62 -> d30abc4b5


PHOENIX-5008: CQSI.init should not bubble up RetriableUpgradeException to 
client in case of an UpgradeRequiredException


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d30abc4b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d30abc4b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d30abc4b

Branch: refs/heads/4.x-HBase-1.2
Commit: d30abc4b5da2abd5396dd3a3a6cc2dea8079960c
Parents: 78ecfce
Author: Chinmay Kulkarni 
Authored: Fri Nov 9 19:22:57 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Nov 13 16:42:01 2018 -0800

--
 .../SystemCatalogCreationOnConnectionIT.java| 97 +---
 .../query/ConnectionQueryServicesImpl.java  |  4 +-
 2 files changed, 84 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d30abc4b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
index a1685c44..eadd391 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
@@ -21,9 +21,11 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.apache.phoenix.query.BaseTest.generateUniqueName;
 
 import java.io.IOException;
 import java.sql.Connection;
+import java.sql.DriverManager;
 import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -42,6 +44,7 @@ import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.UpgradeRequiredException;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
 import org.apache.phoenix.query.ConnectionQueryServices;
@@ -69,6 +72,12 @@ public class SystemCatalogCreationOnConnectionIT {
 private static final String PHOENIX_SYSTEM_CATALOG = "SYSTEM.CATALOG";
 private static final String EXECUTE_UPGRADE_COMMAND = "EXECUTE UPGRADE";
 private static final String MODIFIED_MAX_VERSIONS ="5";
+private static final String CREATE_TABLE_STMT = "CREATE TABLE %s"
++ " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY 
KEY(K1,K2))";
+private static final String SELECT_STMT = "SELECT * FROM %s";
+private static final String DELETE_STMT = "DELETE FROM %s";
+private static final String CREATE_INDEX_STMT = "CREATE INDEX DUMMY_IDX ON 
%s (K1) INCLUDE (K2)";
+private static final String UPSERT_STMT = "UPSERT INTO %s VALUES ('A', 
'B')";
 
 private static final Set PHOENIX_SYSTEM_TABLES = new 
HashSet<>(Arrays.asList(
   "SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", "SYSTEM.FUNCTION",
@@ -167,12 +176,8 @@ public class SystemCatalogCreationOnConnectionIT {
 UpgradeUtil.doNotUpgradeOnFirstConnection(propsDoNotUpgradePropSet);
 SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
   new 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
-try {
-driver.getConnectionQueryServices(getJdbcUrl(), 
propsDoNotUpgradePropSet);
-fail("Client should not be able to create SYSTEM.CATALOG since we 
set the doNotUpgrade property");
-} catch (Exception e) {
-assertTrue(e instanceof UpgradeRequiredException);
-}
+
+driver.getConnectionQueryServices(getJdbcUrl(), 
propsDoNotUpgradePropSet);
 hbaseTables = getHBaseTables();
 assertFalse(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG) || 
hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG));
 assertTrue(hbaseTables.size() == 0);
@@ -428,6 +433,70 @@ public class SystemCatalogCreationOnConnectionIT {
 assertEquals(Integer.parseInt(MODIFIED_MAX_VERSIONS), 
verifyModificationTableMetadata(driver, PHOENIX_SYSTEM_CATALOG));
 }
 
+// Test the case when an end-user uses the vanilla PhoenixDriver to create 
a connection and a
+// requirement for upgrade is detected. In this case, the user should get 
a connection on which
+// they are only able to run "EXECUTE UPGRADE"
+@Test
+public v

phoenix git commit: PHOENIX-5008: CQSI.init should not bubble up RetriableUpgradeException to client in case of an UpgradeRequiredException

2018-11-13 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/master dfff4e8ca -> 6c286dbfe


PHOENIX-5008: CQSI.init should not bubble up RetriableUpgradeException to 
client in case of an UpgradeRequiredException


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6c286dbf
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6c286dbf
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6c286dbf

Branch: refs/heads/master
Commit: 6c286dbfe8f8e9c27def8c1d98cc22ab688e284e
Parents: dfff4e8
Author: Chinmay Kulkarni 
Authored: Fri Nov 9 19:22:57 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Nov 13 16:41:53 2018 -0800

--
 .../SystemCatalogCreationOnConnectionIT.java| 97 +---
 .../query/ConnectionQueryServicesImpl.java  |  4 +-
 2 files changed, 84 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c286dbf/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
index a1685c44..eadd391 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
@@ -21,9 +21,11 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.apache.phoenix.query.BaseTest.generateUniqueName;
 
 import java.io.IOException;
 import java.sql.Connection;
+import java.sql.DriverManager;
 import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -42,6 +44,7 @@ import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.UpgradeRequiredException;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
 import org.apache.phoenix.query.ConnectionQueryServices;
@@ -69,6 +72,12 @@ public class SystemCatalogCreationOnConnectionIT {
 private static final String PHOENIX_SYSTEM_CATALOG = "SYSTEM.CATALOG";
 private static final String EXECUTE_UPGRADE_COMMAND = "EXECUTE UPGRADE";
 private static final String MODIFIED_MAX_VERSIONS ="5";
+private static final String CREATE_TABLE_STMT = "CREATE TABLE %s"
++ " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY 
KEY(K1,K2))";
+private static final String SELECT_STMT = "SELECT * FROM %s";
+private static final String DELETE_STMT = "DELETE FROM %s";
+private static final String CREATE_INDEX_STMT = "CREATE INDEX DUMMY_IDX ON 
%s (K1) INCLUDE (K2)";
+private static final String UPSERT_STMT = "UPSERT INTO %s VALUES ('A', 
'B')";
 
 private static final Set PHOENIX_SYSTEM_TABLES = new 
HashSet<>(Arrays.asList(
   "SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", "SYSTEM.FUNCTION",
@@ -167,12 +176,8 @@ public class SystemCatalogCreationOnConnectionIT {
 UpgradeUtil.doNotUpgradeOnFirstConnection(propsDoNotUpgradePropSet);
 SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
   new 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
-try {
-driver.getConnectionQueryServices(getJdbcUrl(), 
propsDoNotUpgradePropSet);
-fail("Client should not be able to create SYSTEM.CATALOG since we 
set the doNotUpgrade property");
-} catch (Exception e) {
-assertTrue(e instanceof UpgradeRequiredException);
-}
+
+driver.getConnectionQueryServices(getJdbcUrl(), 
propsDoNotUpgradePropSet);
 hbaseTables = getHBaseTables();
 assertFalse(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG) || 
hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG));
 assertTrue(hbaseTables.size() == 0);
@@ -428,6 +433,70 @@ public class SystemCatalogCreationOnConnectionIT {
 assertEquals(Integer.parseInt(MODIFIED_MAX_VERSIONS), 
verifyModificationTableMetadata(driver, PHOENIX_SYSTEM_CATALOG));
 }
 
+// Test the case when an end-user uses the vanilla PhoenixDriver to create 
a connection and a
+// requirement for upgrade is detected. In this case, the user should get 
a connection on which
+// they are only able to run "EXECUTE UPGRADE"
+@Test
+public void testExecut

phoenix git commit: PHOENIX-5017 Fix testRecreateViewWhoseParentWasDropped test flapper

2018-11-13 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 8cd7898a1 -> c7a400933


PHOENIX-5017 Fix testRecreateViewWhoseParentWasDropped test flapper


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c7a40093
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c7a40093
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c7a40093

Branch: refs/heads/4.x-HBase-1.3
Commit: c7a400933c25de8324b19f085ea267e0ff11e5a2
Parents: 8cd7898
Author: Thomas D'Silva 
Authored: Tue Nov 13 15:42:19 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Nov 13 15:43:45 2018 -0800

--
 .../phoenix/coprocessor/MetaDataEndpointImpl.java   | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c7a40093/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index d899e32..5562340 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2035,8 +2035,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 }
 }
 
-// check if the table was dropped, but had child views that were 
have not yet
-// been cleaned up by compaction
+// check if the table was dropped, but had child views that were 
have not yet been cleaned up
 if 
(!Bytes.toString(schemaName).equals(QueryConstants.SYSTEM_SCHEMA_NAME)) {
 dropChildViews(env, tenantIdBytes, schemaName, tableName);
 }
@@ -2434,8 +2433,13 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 MetaDataClient client = new MetaDataClient(connection);
 org.apache.phoenix.parse.TableName viewTableName = 
org.apache.phoenix.parse.TableName
 .create(Bytes.toString(viewSchemaName), 
Bytes.toString(viewName));
-client.dropTable(
-new DropTableStatement(viewTableName, 
PTableType.VIEW, false, true, true));
+try {
+client.dropTable(
+new DropTableStatement(viewTableName, 
PTableType.VIEW, false, true, true));
+}
+catch (TableNotFoundException e) {
+logger.info("Ignoring view "+viewTableName+" as it has 
already been dropped");
+}
 }
 }
 }



phoenix git commit: PHOENIX-5017 Fix testRecreateViewWhoseParentWasDropped test flapper

2018-11-13 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/master 4b4466f9b -> dfff4e8ca


PHOENIX-5017 Fix testRecreateViewWhoseParentWasDropped test flapper


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/dfff4e8c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/dfff4e8c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/dfff4e8c

Branch: refs/heads/master
Commit: dfff4e8ca0dd8dd94b261508bd77c2ed74091f83
Parents: 4b4466f
Author: Thomas D'Silva 
Authored: Tue Nov 13 15:42:19 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Nov 13 16:35:37 2018 -0800

--
 .../phoenix/coprocessor/MetaDataEndpointImpl.java   | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/dfff4e8c/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 462e853..e925c45 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2039,8 +2039,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
 }
 }
 
-// check if the table was dropped, but had child views that were 
have not yet
-// been cleaned up by compaction
+// check if the table was dropped, but had child views that were 
have not yet been cleaned up
 if 
(!Bytes.toString(schemaName).equals(QueryConstants.SYSTEM_SCHEMA_NAME)) {
 dropChildViews(env, tenantIdBytes, schemaName, tableName);
 }
@@ -2452,8 +2451,13 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
 MetaDataClient client = new MetaDataClient(connection);
 org.apache.phoenix.parse.TableName viewTableName = 
org.apache.phoenix.parse.TableName
 .create(Bytes.toString(viewSchemaName), 
Bytes.toString(viewName));
-client.dropTable(
-new DropTableStatement(viewTableName, 
PTableType.VIEW, false, true, true));
+try {
+client.dropTable(
+new DropTableStatement(viewTableName, 
PTableType.VIEW, false, true, true));
+}
+catch (TableNotFoundException e) {
+logger.info("Ignoring view "+viewTableName+" as it has 
already been dropped");
+}
 }
 }
 }



phoenix git commit: PHOENIX-5017 Fix testRecreateViewWhoseParentWasDropped test flapper

2018-11-13 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 ce89c2c1d -> edb18c639


PHOENIX-5017 Fix testRecreateViewWhoseParentWasDropped test flapper


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/edb18c63
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/edb18c63
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/edb18c63

Branch: refs/heads/4.x-HBase-1.4
Commit: edb18c6392aeb197e3d3b96d8d72ede4476f46a2
Parents: ce89c2c
Author: Thomas D'Silva 
Authored: Tue Nov 13 15:42:19 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Nov 13 16:34:31 2018 -0800

--
 .../phoenix/coprocessor/MetaDataEndpointImpl.java   | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/edb18c63/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index d899e32..5562340 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2035,8 +2035,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 }
 }
 
-// check if the table was dropped, but had child views that were 
have not yet
-// been cleaned up by compaction
+// check if the table was dropped, but had child views that were 
have not yet been cleaned up
 if 
(!Bytes.toString(schemaName).equals(QueryConstants.SYSTEM_SCHEMA_NAME)) {
 dropChildViews(env, tenantIdBytes, schemaName, tableName);
 }
@@ -2434,8 +2433,13 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 MetaDataClient client = new MetaDataClient(connection);
 org.apache.phoenix.parse.TableName viewTableName = 
org.apache.phoenix.parse.TableName
 .create(Bytes.toString(viewSchemaName), 
Bytes.toString(viewName));
-client.dropTable(
-new DropTableStatement(viewTableName, 
PTableType.VIEW, false, true, true));
+try {
+client.dropTable(
+new DropTableStatement(viewTableName, 
PTableType.VIEW, false, true, true));
+}
+catch (TableNotFoundException e) {
+logger.info("Ignoring view "+viewTableName+" as it has 
already been dropped");
+}
 }
 }
 }



phoenix git commit: PHOENIX-5017 Fix testRecreateViewWhoseParentWasDropped test flapper

2018-11-13 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 798aaeedb -> 78ecfce62


PHOENIX-5017 Fix testRecreateViewWhoseParentWasDropped test flapper


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/78ecfce6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/78ecfce6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/78ecfce6

Branch: refs/heads/4.x-HBase-1.2
Commit: 78ecfce6276561f163e5b98d5d80cafecd217a9b
Parents: 798aaee
Author: Thomas D'Silva 
Authored: Tue Nov 13 15:42:19 2018 -0800
Committer: Thomas D'Silva 
Committed: Tue Nov 13 16:34:14 2018 -0800

--
 .../phoenix/coprocessor/MetaDataEndpointImpl.java   | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/78ecfce6/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index d899e32..5562340 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2035,8 +2035,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 }
 }
 
-// check if the table was dropped, but had child views that were 
have not yet
-// been cleaned up by compaction
+// check if the table was dropped, but had child views that were 
have not yet been cleaned up
 if 
(!Bytes.toString(schemaName).equals(QueryConstants.SYSTEM_SCHEMA_NAME)) {
 dropChildViews(env, tenantIdBytes, schemaName, tableName);
 }
@@ -2434,8 +2433,13 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 MetaDataClient client = new MetaDataClient(connection);
 org.apache.phoenix.parse.TableName viewTableName = 
org.apache.phoenix.parse.TableName
 .create(Bytes.toString(viewSchemaName), 
Bytes.toString(viewName));
-client.dropTable(
-new DropTableStatement(viewTableName, 
PTableType.VIEW, false, true, true));
+try {
+client.dropTable(
+new DropTableStatement(viewTableName, 
PTableType.VIEW, false, true, true));
+}
+catch (TableNotFoundException e) {
+logger.info("Ignoring view "+viewTableName+" as it has 
already been dropped");
+}
 }
 }
 }



phoenix git commit: Revert "PHOENIX-4971 Drop index will execute successfully using Incorrect name of parent tables"

2018-11-07 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 ee8db198a -> 131ff09b2


Revert "PHOENIX-4971 Drop index will execute successfully using Incorrect name 
of parent tables"

This reverts commit 7b7e4d4b286fa9a7bb857ea7ff3cd3bb3812eaf5.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/131ff09b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/131ff09b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/131ff09b

Branch: refs/heads/4.x-HBase-1.4
Commit: 131ff09b23032c1b5271b49050c7eacd63ef5a34
Parents: ee8db19
Author: Thomas D'Silva 
Authored: Wed Nov 7 11:10:46 2018 -0800
Committer: Thomas D'Silva 
Committed: Wed Nov 7 11:10:46 2018 -0800

--
 .../phoenix/end2end/index/DropMetadataIT.java   | 24 +---
 .../phoenix/exception/SQLExceptionCode.java |  2 --
 .../apache/phoenix/schema/MetaDataClient.java   | 15 
 3 files changed, 1 insertion(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/131ff09b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
index 44eddfb..b92ed8d 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
@@ -18,12 +18,9 @@
 package org.apache.phoenix.end2end.index;
 
 import static org.apache.phoenix.util.TestUtil.HBASE_NATIVE_SCHEMA_NAME;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
-import java.sql.SQLException;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -32,7 +29,6 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
-import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PropertiesUtil;
@@ -40,7 +36,6 @@ import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
-
 import org.junit.Test;
 
 public class DropMetadataIT extends ParallelStatsDisabledIT {
@@ -61,24 +56,7 @@ public class DropMetadataIT extends ParallelStatsDisabledIT {
 String url = QueryUtil.getConnectionUrl(props, config, PRINCIPAL);
 return DriverManager.getConnection(url, props);
 }
-
-@Test
-public void testDropIndexTableHasSameNameWithDataTable() {
-String tableName = generateUniqueName();
-String indexName = "IDX_" + tableName;
-try (Connection conn = DriverManager.getConnection(getUrl())) {
-String createTable = "CREATE TABLE " + tableName + "  (id varchar 
not null primary key, col integer)";
-conn.createStatement().execute(createTable);
-String createIndex = "CREATE INDEX " + indexName + " on " + 
tableName + "(col)";
-conn.createStatement().execute(createIndex);
-String dropIndex = "DROP INDEX " + indexName + " on " + indexName;
-conn.createStatement().execute(dropIndex);
-fail("should not execute successfully");
-} catch (SQLException e) {
-assertTrue(SQLExceptionCode.PARENT_TABLE_NOT_FOUND.getErrorCode() 
== e.getErrorCode());
-}
-}
-
+
 @Test
 public void testDropViewKeepsHTable() throws Exception {
 Connection conn = getConnection();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/131ff09b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index 5bffed5..d557714 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -185,8 +185,6 @@ public enum SQLExceptionCode {
  INVALID_REPLAY_AT(533, "42910", "Value of REPLAY_AT cannot be less than 
zero."),
  UNEQUAL_SCN_AND_BUILD_INDEX_AT(534, "42911", "If both specified, values 
of CURRENT_SCN and BUILD_INDEX_AT must be equal."),
  ONLY_INDEX_UPDATABLE_AT_SCN(

phoenix git commit: Revert "PHOENIX-4971 Drop index will execute successfully using Incorrect name of parent tables"

2018-11-07 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 d6083ae5c -> fd7ae9406


Revert "PHOENIX-4971 Drop index will execute successfully using Incorrect name 
of parent tables"

This reverts commit 527098ed621d62ba18fc19099ec67d8b29e027f4.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fd7ae940
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fd7ae940
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fd7ae940

Branch: refs/heads/4.x-HBase-1.3
Commit: fd7ae94065ae33f1bb20bca60b7756595f8b10b9
Parents: d6083ae
Author: Thomas D'Silva 
Authored: Wed Nov 7 11:10:05 2018 -0800
Committer: Thomas D'Silva 
Committed: Wed Nov 7 11:10:05 2018 -0800

--
 .../phoenix/end2end/index/DropMetadataIT.java   | 23 +---
 .../phoenix/exception/SQLExceptionCode.java |  2 --
 .../apache/phoenix/schema/MetaDataClient.java   | 15 -
 3 files changed, 1 insertion(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd7ae940/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
index 3c670c5..b92ed8d 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
@@ -18,12 +18,9 @@
 package org.apache.phoenix.end2end.index;
 
 import static org.apache.phoenix.util.TestUtil.HBASE_NATIVE_SCHEMA_NAME;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
-import java.sql.SQLException;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -32,7 +29,6 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
-import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PropertiesUtil;
@@ -60,24 +56,7 @@ public class DropMetadataIT extends ParallelStatsDisabledIT {
 String url = QueryUtil.getConnectionUrl(props, config, PRINCIPAL);
 return DriverManager.getConnection(url, props);
 }
-
-@Test
-public void testDropIndexTableHasSameNameWithDataTable() {
-String tableName = generateUniqueName();
-String indexName = "IDX_" + tableName;
-try (Connection conn = DriverManager.getConnection(getUrl())) {
-String createTable = "CREATE TABLE " + tableName + "  (id varchar 
not null primary key, col integer)";
-conn.createStatement().execute(createTable);
-String createIndex = "CREATE INDEX " + indexName + " on " + 
tableName + "(col)";
-conn.createStatement().execute(createIndex);
-String dropIndex = "DROP INDEX " + indexName + " on " + indexName;
-conn.createStatement().execute(dropIndex);
-fail("should not execute successfully");
-} catch (SQLException e) {
-assertTrue(SQLExceptionCode.PARENT_TABLE_NOT_FOUND.getErrorCode() 
== e.getErrorCode());
-}
-}
-
+
 @Test
 public void testDropViewKeepsHTable() throws Exception {
 Connection conn = getConnection();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fd7ae940/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index 5bffed5..d557714 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -185,8 +185,6 @@ public enum SQLExceptionCode {
  INVALID_REPLAY_AT(533, "42910", "Value of REPLAY_AT cannot be less than 
zero."),
  UNEQUAL_SCN_AND_BUILD_INDEX_AT(534, "42911", "If both specified, values 
of CURRENT_SCN and BUILD_INDEX_AT must be equal."),
  ONLY_INDEX_UPDATABLE_AT_SCN(535, "42912", "Only an index may be updated 
when the BUILD_INDEX_AT property is specified"),
- PARENT_TABLE_NOT_FOUND(536, "42913", "Can't drop the index because the 
parent table in the DROP statement is incorrect."),
-
  /**
  * HBase and Phoenix specific implement

<    1   2   3   4   5   6   7   8   9   10   >