[phoenix] branch master updated (8732c13f74 -> 2cebb20886)

2022-05-10 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


from 8732c13f74 PHOENIX-6699 Phoenix metrics overwriting 
DefaultMetricsSystem in RegionServers (addendum: set up hbase prefix for ITs)
 add 2cebb20886 PHOENIX-6710 Revert PHOENIX-3842 Turn on back default 
bloomFilter for Phoenix Tables (#1436)

No new revisions were added by this update.

Summary of changes:
 .../src/it/java/org/apache/phoenix/end2end/CreateTableIT.java   | 6 +++---
 .../java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java  | 4 +---
 .../org/apache/phoenix/schema/tool/SchemaExtractionProcessor.java   | 6 --
 3 files changed, 8 insertions(+), 8 deletions(-)



svn commit: r46318 - /release/incubator/tephra/

2021-02-24 Thread ankit
Author: ankit
Date: Thu Feb 25 01:58:17 2021
New Revision: 46318

Log:
PHOENIX-6393 Removing incubator tephra obsolete releases.

Removed:
release/incubator/tephra/



svn commit: r46317 - /release/incubator/omid/

2021-02-24 Thread ankit
Author: ankit
Date: Thu Feb 25 01:56:26 2021
New Revision: 46317

Log:
PHOENIX-6391 Removing incubator omid obsolete releases.

Removed:
release/incubator/omid/



svn commit: r46316 - /dev/incubator/omid/

2021-02-24 Thread ankit
Author: ankit
Date: Thu Feb 25 01:51:26 2021
New Revision: 46316

Log:
PHOENIX-6391 Removing incubator omid dev directory.

Removed:
dev/incubator/omid/



svn commit: r46315 - /dev/incubator/tephra/

2021-02-24 Thread ankit
Author: ankit
Date: Thu Feb 25 01:49:20 2021
New Revision: 46315

Log:
PHOENIX-6393 Removing tephra dev directory.

Removed:
dev/incubator/tephra/



svn commit: r46314 - in /dev/incubator/tephra: 0.14.0-incubating-rc1/ 0.15.0-incubating-rc1/ KEYS

2021-02-24 Thread ankit
Author: ankit
Date: Thu Feb 25 01:48:21 2021
New Revision: 46314

Log:
PHOENIX-6393 Removing RC artifacts.

Removed:
dev/incubator/tephra/0.14.0-incubating-rc1/
dev/incubator/tephra/0.15.0-incubating-rc1/
dev/incubator/tephra/KEYS



svn commit: r46313 - /dev/incubator/tephra/0.13.0-incubating-rc2/

2021-02-24 Thread ankit
Author: ankit
Date: Thu Feb 25 01:45:42 2021
New Revision: 46313

Log:
PHOENIX-6393 Removing RC artifacts.

Removed:
dev/incubator/tephra/0.13.0-incubating-rc2/



[phoenix] branch 4.x updated: PHOENIX-6196 Update phoenix.mutate.maxSizeBytes to accept long values (#930)

2020-10-20 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 2ff920a  PHOENIX-6196 Update phoenix.mutate.maxSizeBytes to accept 
long values (#930)
2ff920a is described below

commit 2ff920aac0002fead70317a8ecd4f51d04f191eb
Author: Ankit Singhal 
AuthorDate: Tue Oct 20 12:52:38 2020 -0700

PHOENIX-6196 Update phoenix.mutate.maxSizeBytes to accept long values (#930)
---
 .../org/apache/phoenix/execute/PartialCommitIT.java|  2 +-
 .../org/apache/phoenix/compile/DeleteCompiler.java | 16 
 .../org/apache/phoenix/compile/UpsertCompiler.java | 18 +-
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |  4 ++--
 4 files changed, 20 insertions(+), 20 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
index 2e15487..6e13564 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
@@ -269,7 +269,7 @@ public class PartialCommitIT extends 
BaseUniqueNamesOwnClusterIT {
 // passing a null mutation state forces the 
connection.newMutationState() to be used to create the MutationState
 return new PhoenixConnection(con, (MutationState)null) {
 @Override
-protected MutationState newMutationState(int maxSize, int 
maxSizeBytes) {
+protected MutationState newMutationState(int maxSize, long 
maxSizeBytes) {
 return new MutationState(maxSize, maxSizeBytes, this, 
mutations, false, null);
 };
 };
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 948f076..a2e76ba 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -141,7 +141,7 @@ public class DeleteCompiler {
 final boolean autoFlush = connection.getAutoCommit() || 
tableRef.getTable().isTransactional();
 ConnectionQueryServices services = connection.getQueryServices();
 final int maxSize = 
services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
-final int maxSizeBytes = 
services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
+final long maxSizeBytes = 
services.getProps().getLong(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
 final int batchSize = Math.min(connection.getMutateBatchSize(), 
maxSize);
 MultiRowMutationState mutations = new MultiRowMutationState(batchSize);
 List otherMutations = null;
@@ -568,7 +568,7 @@ public class DeleteCompiler {
 }
 
 final int maxSize = 
services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
-final int maxSizeBytes = 
services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
+final long maxSizeBytes = 
services.getProps().getLong(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
  
 // If we're doing a query for a set of rows with no where clause, then 
we don't need to contact the server at all.
 if (noQueryReqd) {
@@ -648,9 +648,9 @@ public class DeleteCompiler {
 private final PhoenixConnection connection;
 private final int maxSize;
 private final StatementContext context;
-private final int maxSizeBytes;
+private final long maxSizeBytes;
 
-public SingleRowDeleteMutationPlan(QueryPlan dataPlan, 
PhoenixConnection connection, int maxSize, int maxSizeBytes) {
+public SingleRowDeleteMutationPlan(QueryPlan dataPlan, 
PhoenixConnection connection, int maxSize, long maxSizeBytes) {
 this.dataPlan = dataPlan;
 this.connection = connection;
 this.maxSize = maxSize;
@@ -732,10 +732,10 @@ public class DeleteCompiler {
 private final QueryPlan aggPlan;
 private final RowProjector projector;
 private final int maxSize;
-private final int maxSizeBytes;
+private final long maxSizeBytes;
 
 public ServerSelectDeleteMutationPlan(QueryPlan dataPlan, 
PhoenixConnection connection, QueryPlan aggPlan,
-  RowProjector projector, int 
maxSize, int ma

[phoenix] branch master updated: PHOENIX-6196 Update phoenix.mutate.maxSizeBytes to accept long values (#930)

2020-10-20 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new a5b2337  PHOENIX-6196 Update phoenix.mutate.maxSizeBytes to accept 
long values (#930)
a5b2337 is described below

commit a5b2337b8618ae247dc33e56c4d531e0f80b392d
Author: Ankit Singhal 
AuthorDate: Tue Oct 20 12:52:38 2020 -0700

PHOENIX-6196 Update phoenix.mutate.maxSizeBytes to accept long values (#930)
---
 .../org/apache/phoenix/execute/PartialCommitIT.java|  2 +-
 .../org/apache/phoenix/compile/DeleteCompiler.java | 16 
 .../org/apache/phoenix/compile/UpsertCompiler.java | 18 +-
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |  4 ++--
 4 files changed, 20 insertions(+), 20 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
index ee24ca9..e452da4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
@@ -270,7 +270,7 @@ public class PartialCommitIT extends 
BaseUniqueNamesOwnClusterIT {
 // passing a null mutation state forces the 
connection.newMutationState() to be used to create the MutationState
 return new PhoenixConnection(con, (MutationState)null) {
 @Override
-protected MutationState newMutationState(int maxSize, int 
maxSizeBytes) {
+protected MutationState newMutationState(int maxSize, long 
maxSizeBytes) {
 return new MutationState(maxSize, maxSizeBytes, this, 
mutations, false, null);
 };
 };
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index c682665..f8013fa 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -142,7 +142,7 @@ public class DeleteCompiler {
 final boolean autoFlush = connection.getAutoCommit() || 
tableRef.getTable().isTransactional();
 ConnectionQueryServices services = connection.getQueryServices();
 final int maxSize = 
services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
-final int maxSizeBytes = 
services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
+final long maxSizeBytes = 
services.getProps().getLong(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
 final int batchSize = Math.min(connection.getMutateBatchSize(), 
maxSize);
 MultiRowMutationState mutations = new MultiRowMutationState(batchSize);
 List otherMutations = null;
@@ -569,7 +569,7 @@ public class DeleteCompiler {
 }
 
 final int maxSize = 
services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
-final int maxSizeBytes = 
services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
+final long maxSizeBytes = 
services.getProps().getLong(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
  
 // If we're doing a query for a set of rows with no where clause, then 
we don't need to contact the server at all.
 if (noQueryReqd) {
@@ -649,9 +649,9 @@ public class DeleteCompiler {
 private final PhoenixConnection connection;
 private final int maxSize;
 private final StatementContext context;
-private final int maxSizeBytes;
+private final long maxSizeBytes;
 
-public SingleRowDeleteMutationPlan(QueryPlan dataPlan, 
PhoenixConnection connection, int maxSize, int maxSizeBytes) {
+public SingleRowDeleteMutationPlan(QueryPlan dataPlan, 
PhoenixConnection connection, int maxSize, long maxSizeBytes) {
 this.dataPlan = dataPlan;
 this.connection = connection;
 this.maxSize = maxSize;
@@ -733,10 +733,10 @@ public class DeleteCompiler {
 private final QueryPlan aggPlan;
 private final RowProjector projector;
 private final int maxSize;
-private final int maxSizeBytes;
+private final long maxSizeBytes;
 
 public ServerSelectDeleteMutationPlan(QueryPlan dataPlan, 
PhoenixConnection connection, QueryPlan aggPlan,
-  RowProjector projector, int 
maxSize, int ma

svn commit: r1882167 - in /phoenix/site: publish/ publish/language/ source/src/site/markdown/

2020-09-30 Thread ankit
Author: ankit
Date: Wed Sep 30 20:51:52 2020
New Revision: 1882167

URL: http://svn.apache.org/viewvc?rev=1882167&view=rev
Log:
PHOENIX-6014 Add documentation for PHOENIX-2715 (addendum: Remove advanced 
config of disruptor , formatting changes)

Modified:
phoenix/site/publish/array_type.html
phoenix/site/publish/cursors.html
phoenix/site/publish/develop.html
phoenix/site/publish/dynamic_columns.html
phoenix/site/publish/index.html
phoenix/site/publish/joins.html
phoenix/site/publish/language/datatypes.html
phoenix/site/publish/language/functions.html
phoenix/site/publish/language/index.html
phoenix/site/publish/multi-tenancy.html
phoenix/site/publish/news.html
phoenix/site/publish/paged.html
phoenix/site/publish/pig_integration.html
phoenix/site/publish/python.html
phoenix/site/publish/release.html
phoenix/site/publish/resources.html
phoenix/site/publish/roadmap.html
phoenix/site/publish/salted.html
phoenix/site/publish/sequences.html
phoenix/site/publish/skip_scan.html
phoenix/site/publish/source.html
phoenix/site/publish/tablesample.html
phoenix/site/publish/tuning.html
phoenix/site/publish/tuning_guide.html
phoenix/site/publish/udf.html
phoenix/site/source/src/site/markdown/tuning.md

Modified: phoenix/site/publish/array_type.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/array_type.html?rev=1882167&r1=1882166&r2=1882167&view=diff
==
--- phoenix/site/publish/array_type.html (original)
+++ phoenix/site/publish/array_type.html Wed Sep 30 20:51:52 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/cursors.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/cursors.html?rev=1882167&r1=1882166&r2=1882167&view=diff
==
--- phoenix/site/publish/cursors.html (original)
+++ phoenix/site/publish/cursors.html Wed Sep 30 20:51:52 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/develop.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/develop.html?rev=1882167&r1=1882166&r2=1882167&view=diff
==
--- phoenix/site/publish/develop.html (original)
+++ phoenix/site/publish/develop.html Wed Sep 30 20:51:52 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/dynamic_columns.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/dynamic_columns.html?rev=1882167&r1=1882166&r2=1882167&view=diff
==
--- phoenix/site/publish/dynamic_columns.html (original)
+++ phoenix/site/publish/dynamic_columns.html Wed Sep 30 20:51:52 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/index.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/index.html?rev=1882167&r1=1882166&r2=1882167&view=diff
==
--- phoenix/site/publish/index.html (original)
+++ phoenix/site/publish/index.html Wed Sep 30 20:51:52 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/joins.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/joins.html?rev=1882167&r1=1882166&r2=1882167&view=diff
==
--- phoenix/site/publish/joins.html (original)
+++ phoenix/site/publish/joins.html Wed Sep 30 20:51:52 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/datatypes.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/datatypes.html?rev=1882167&r1=1882166&r2=1882167&view=diff
==
--- phoenix/site/publish/language/datatypes.html (original)
+++ phoenix/site/publish/language/datatypes.html Wed Sep 30 20:51:52 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/functions.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/functions.html?rev=1882167&r1=1882166&r2=1882167&view=diff
==
--- phoenix/site/publish/language/functions.html (original)
+++ phoenix/site/publish/language/functions.html Wed Sep 30 20:51:52 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/index.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/index.html?rev=1882167&r1=1882166&r2=1882167&view=diff
==
--- phoenix/site/publish/language/index.html (original)
+++ phoenix/site/publish/language/index.html Wed Sep 30 20:51:52 2020
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/multi-tenancy.html
URL: 

[phoenix] branch 4.x updated: PHOENIX-6034 Optimize InListIT (#838)

2020-08-25 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 7575123  PHOENIX-6034 Optimize InListIT (#838)
7575123 is described below

commit 7575123f5638d2e85bc017322b945ab9e246ad49
Author: Ankit Singhal 
AuthorDate: Tue Aug 25 21:40:55 2020 -0700

PHOENIX-6034 Optimize InListIT (#838)
---
 .../java/org/apache/phoenix/end2end/InListIT.java  | 156 ++---
 1 file changed, 107 insertions(+), 49 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java
index b0aee8f..c64fa79 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java
@@ -26,15 +26,18 @@ import static org.junit.Assert.assertTrue;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
-import java.sql.PreparedStatement;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 
+import com.google.common.base.Function;
+import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
 import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.iterate.ExplainTable;
 import org.apache.phoenix.schema.SortOrder;
@@ -43,27 +46,39 @@ import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import com.google.common.base.Function;
-import com.google.common.base.Joiner;
-import com.google.common.collect.Lists;
-
 
 public class InListIT extends ParallelStatsDisabledIT {
 private static final String TENANT_SPECIFIC_URL1 = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=tenant1";
-String tableName;
-String descViewName;
-String ascViewName;
+private static boolean isInitialized = false;
+private static String tableName = generateUniqueName();
+private static String tableName2 = generateUniqueName();
+private static String descViewName = generateUniqueName();
+private static String ascViewName = generateUniqueName();
+private static String viewName1 = generateUniqueName();
+private static String viewName2 = generateUniqueName();
+private static String prefix = generateUniqueName();
 
 @Before
 public void setup() throws Exception {
-tableName = generateUniqueName();
-descViewName = generateUniqueName();
-ascViewName = generateUniqueName();
-buildSchema(tableName, generateUniqueName(), true);
-buildSchema(generateUniqueName(), generateUniqueName(), false);
+if(isInitialized){
+return;
+}
+initializeTables();
+isInitialized = true;
+}
+
+@After
+public void cleanUp() throws SQLException {
+deleteTenantData(descViewName);
+deleteTenantData(viewName1);
+deleteTenantData(viewName2);
+deleteTenantData(ascViewName);
+deleteTenantData(tableName);
+deleteTenantData(tableName2);
 }
 
 @Test
@@ -163,7 +178,7 @@ public class InListIT extends ParallelStatsDisabledIT {
  * @return  the table or view name that should be used to access the 
created table
  */
 private static String initializeAndGetTable(Connection baseConn, 
Connection conn, boolean isMultiTenant, PDataType pkType, int saltBuckets) 
throws SQLException {
-String tableName = generateUniqueName() + "in_test" + 
pkType.getSqlTypeName() + saltBuckets + (isMultiTenant ? "_multi" : "_single");
+String tableName = getTableName(isMultiTenant, pkType, saltBuckets);
 String tableDDL = createTableDDL(tableName, pkType, saltBuckets, 
isMultiTenant);
 baseConn.createStatement().execute(tableDDL);
 
@@ -179,6 +194,12 @@ public class InListIT extends ParallelStatsDisabledIT {
 }
 }
 
+private static String getTableName(boolean isMultiTenant, PDataType 
pkType, int saltBuckets) {
+return prefix+"init_in_test_" + pkType.getSqlTypeName() + saltBuckets 
+ (isMultiTenant ?
+"_multi" :
+"_single");
+}
+
 private static final String TENANT_ID = "ABC";
 private static final String TENANT_URL = getUrl() + ";" + 
PhoenixRuntime.TENANT_ID_ATTRIB + '=' + TENANT_ID;
 
@@ -189,15 +210,51 @@ public class InListIT extends ParallelStatsDisabledIT {
 
 private static final List HINTS = Arrays.asList("/*+ SK

[phoenix] branch master updated: PHOENIX-6034 Optimize InListIT (#838)

2020-08-25 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 674efb9  PHOENIX-6034 Optimize InListIT (#838)
674efb9 is described below

commit 674efb9e39f02e63872fdf1651723106b7bcfc1d
Author: Ankit Singhal 
AuthorDate: Tue Aug 25 21:40:55 2020 -0700

PHOENIX-6034 Optimize InListIT (#838)
---
 .../java/org/apache/phoenix/end2end/InListIT.java  | 156 ++---
 1 file changed, 107 insertions(+), 49 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java
index 9e3c40a..93d645f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java
@@ -26,15 +26,18 @@ import static org.junit.Assert.assertTrue;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
-import java.sql.PreparedStatement;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 
+import com.google.common.base.Function;
+import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
 import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.iterate.ExplainTable;
 import org.apache.phoenix.schema.SortOrder;
@@ -43,27 +46,39 @@ import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import com.google.common.base.Function;
-import com.google.common.base.Joiner;
-import com.google.common.collect.Lists;
-
 
 public class InListIT extends ParallelStatsDisabledIT {
 private static final String TENANT_SPECIFIC_URL1 = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=tenant1";
-String tableName;
-String descViewName;
-String ascViewName;
+private static boolean isInitialized = false;
+private static String tableName = generateUniqueName();
+private static String tableName2 = generateUniqueName();
+private static String descViewName = generateUniqueName();
+private static String ascViewName = generateUniqueName();
+private static String viewName1 = generateUniqueName();
+private static String viewName2 = generateUniqueName();
+private static String prefix = generateUniqueName();
 
 @Before
 public void setup() throws Exception {
-tableName = generateUniqueName();
-descViewName = generateUniqueName();
-ascViewName = generateUniqueName();
-buildSchema(tableName, generateUniqueName(), true);
-buildSchema(generateUniqueName(), generateUniqueName(), false);
+if(isInitialized){
+return;
+}
+initializeTables();
+isInitialized = true;
+}
+
+@After
+public void cleanUp() throws SQLException {
+deleteTenantData(descViewName);
+deleteTenantData(viewName1);
+deleteTenantData(viewName2);
+deleteTenantData(ascViewName);
+deleteTenantData(tableName);
+deleteTenantData(tableName2);
 }
 
 @Test
@@ -163,7 +178,7 @@ public class InListIT extends ParallelStatsDisabledIT {
  * @return  the table or view name that should be used to access the 
created table
  */
 private static String initializeAndGetTable(Connection baseConn, 
Connection conn, boolean isMultiTenant, PDataType pkType, int saltBuckets) 
throws SQLException {
-String tableName = generateUniqueName() + "in_test" + 
pkType.getSqlTypeName() + saltBuckets + (isMultiTenant ? "_multi" : "_single");
+String tableName = getTableName(isMultiTenant, pkType, saltBuckets);
 String tableDDL = createTableDDL(tableName, pkType, saltBuckets, 
isMultiTenant);
 baseConn.createStatement().execute(tableDDL);
 
@@ -179,6 +194,12 @@ public class InListIT extends ParallelStatsDisabledIT {
 }
 }
 
+private static String getTableName(boolean isMultiTenant, PDataType 
pkType, int saltBuckets) {
+return prefix+"init_in_test_" + pkType.getSqlTypeName() + saltBuckets 
+ (isMultiTenant ?
+"_multi" :
+"_single");
+}
+
 private static final String TENANT_ID = "ABC";
 private static final String TENANT_URL = getUrl() + ";" + 
PhoenixRuntime.TENANT_ID_ATTRIB + '=' + TENANT_ID;
 
@@ -189,15 +210,51 @@ public class InListIT extends ParallelStatsDisabledIT {
 
 private static final List HINTS = Arr

[phoenix] branch 4.x updated: PHOENIX-6023 Wrong result when issuing query for an immutable table with multiple column families (#833)

2020-07-20 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new eb3875a  PHOENIX-6023 Wrong result when issuing query for an immutable 
table with multiple column families (#833)
eb3875a is described below

commit eb3875a5d0e559a96ef126a36751a9e226316d98
Author: Toshihiro Suzuki 
AuthorDate: Mon Jul 20 17:14:56 2020 -0700

PHOENIX-6023 Wrong result when issuing query for an immutable table with 
multiple column families (#833)

Signed-off-by: Ankit Singhal 
---
 .../apache/phoenix/end2end/ImmutableTableIT.java   | 101 +
 .../org/apache/phoenix/compile/WhereCompiler.java  |  33 +--
 2 files changed, 128 insertions(+), 6 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTableIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTableIT.java
new file mode 100644
index 000..9ae505d
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTableIT.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you maynot use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicablelaw or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.Statement;
+
+import static org.junit.Assert.assertEquals;
+
+public class ImmutableTableIT extends ParallelStatsDisabledIT {
+
+@Test
+public void 
testQueryWithMultipleColumnFamiliesAndSingleConditionForImmutableTable()
+throws Exception {
+final String tn = generateUniqueName();
+final String url = getUrl();
+try (Connection conn = DriverManager.getConnection(url);
+Statement stmt = conn.createStatement()) {
+stmt.execute(String.format("CREATE TABLE %s (" +
+"ID VARCHAR PRIMARY KEY," +
+"COL1 VARCHAR," +
+"COL2 VARCHAR" +
+") IMMUTABLE_ROWS = TRUE", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id0', '0', 
'a')", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id1', '1', 
NULL)", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id2', '2', 
'b')", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id3', '3', 
NULL)", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id4', '4', 
'c')", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id5', '5', 
NULL)", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id6', '6', 
'd')", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id7', '7', 
NULL)", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id8', '8', 
'e')", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id9', '9', 
NULL)", tn));
+conn.commit();
+
+try (ResultSet rs = stmt.executeQuery(String.format(
+"SELECT COL1 FROM %s WHERE COL2 IS NOT NULL", tn))) {
+int count = 0;
+while (rs.next()) {
+  count++;
+}
+assertEquals(5, count);
+}
+}
+}
+
+@Test
+public void 
testQueryWithMultipleColumnFamiliesAndMultipleConditionsForImmutableTable()
+throws Exception {
+final String tn = generateUniqueName();
+final String url = getUrl();
+try (Connection conn = DriverManager.getConnection(url);
+Statement stmt = conn.createStatement()) {
+stmt.execute(String.format("CREATE TABLE %s (" +
+"ID VAR

[phoenix] branch master updated: PHOENIX-6023 Wrong result when issuing query for an immutable table with multiple column families (#833)

2020-07-20 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 5904404  PHOENIX-6023 Wrong result when issuing query for an immutable 
table with multiple column families (#833)
5904404 is described below

commit 59044045189f275c75938d441e7a7d74e7670005
Author: Toshihiro Suzuki 
AuthorDate: Tue Jul 21 09:09:28 2020 +0900

PHOENIX-6023 Wrong result when issuing query for an immutable table with 
multiple column families (#833)
---
 .../apache/phoenix/end2end/ImmutableTableIT.java   | 101 +
 .../org/apache/phoenix/compile/WhereCompiler.java  |  33 +--
 2 files changed, 128 insertions(+), 6 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTableIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTableIT.java
new file mode 100644
index 000..9ae505d
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTableIT.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you maynot use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicablelaw or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.Statement;
+
+import static org.junit.Assert.assertEquals;
+
+public class ImmutableTableIT extends ParallelStatsDisabledIT {
+
+@Test
+public void 
testQueryWithMultipleColumnFamiliesAndSingleConditionForImmutableTable()
+throws Exception {
+final String tn = generateUniqueName();
+final String url = getUrl();
+try (Connection conn = DriverManager.getConnection(url);
+Statement stmt = conn.createStatement()) {
+stmt.execute(String.format("CREATE TABLE %s (" +
+"ID VARCHAR PRIMARY KEY," +
+"COL1 VARCHAR," +
+"COL2 VARCHAR" +
+") IMMUTABLE_ROWS = TRUE", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id0', '0', 
'a')", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id1', '1', 
NULL)", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id2', '2', 
'b')", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id3', '3', 
NULL)", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id4', '4', 
'c')", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id5', '5', 
NULL)", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id6', '6', 
'd')", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id7', '7', 
NULL)", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id8', '8', 
'e')", tn));
+stmt.execute(String.format("UPSERT INTO %s VALUES ('id9', '9', 
NULL)", tn));
+conn.commit();
+
+try (ResultSet rs = stmt.executeQuery(String.format(
+"SELECT COL1 FROM %s WHERE COL2 IS NOT NULL", tn))) {
+int count = 0;
+while (rs.next()) {
+  count++;
+}
+assertEquals(5, count);
+}
+}
+}
+
+@Test
+public void 
testQueryWithMultipleColumnFamiliesAndMultipleConditionsForImmutableTable()
+throws Exception {
+final String tn = generateUniqueName();
+final String url = getUrl();
+try (Connection conn = DriverManager.getConnection(url);
+Statement stmt = conn.createStatement()) {
+stmt.execute(String.format("CREATE TABLE %s (" +
+"ID VARCHAR PRIMARY

[phoenix] branch 4.x updated: PHOENIX-5884 Join query return empty result when filters for both the tables are present(addendum)

2020-06-12 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 1587be4  PHOENIX-5884 Join query return empty result when filters for 
both the tables are present(addendum)
1587be4 is described below

commit 1587be4fef2d1b5c13733eee0e4bc0d922117da9
Author: Ankit Singhal 
AuthorDate: Fri Jun 12 10:50:10 2020 -0700

PHOENIX-5884 Join query return empty result when filters for both the 
tables are present(addendum)
---
 .../end2end/join/WhereOptimizerForJoinFiltersIT.java| 17 +
 1 file changed, 17 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/WhereOptimizerForJoinFiltersIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/WhereOptimizerForJoinFiltersIT.java
index 33f1bd8..989cf16 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/WhereOptimizerForJoinFiltersIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/WhereOptimizerForJoinFiltersIT.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.end2end.join;
 
 import static org.junit.Assert.assertEquals;



[phoenix] branch master updated: PHOENIX-5884 Join query return empty result when filters for both the tables are present

2020-06-12 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 7e955ec  PHOENIX-5884 Join query return empty result when filters for 
both the tables are present
7e955ec is described below

commit 7e955ecbcf967e74fbb37e5d0c9c5556ebac5898
Author: Ankit Singhal 
AuthorDate: Fri Jun 12 10:47:43 2020 -0700

PHOENIX-5884 Join query return empty result when filters for both the 
tables are present
---
 .../end2end/join/WhereOptimizerForJoinFiltersIT.java| 17 +
 1 file changed, 17 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/WhereOptimizerForJoinFiltersIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/WhereOptimizerForJoinFiltersIT.java
index 33f1bd8..989cf16 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/WhereOptimizerForJoinFiltersIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/WhereOptimizerForJoinFiltersIT.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.end2end.join;
 
 import static org.junit.Assert.assertEquals;



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5594 Different permission of phoenix-*-queryserver.log from umask (Toshihiro Suzuki)

2019-11-29 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 61d504f  PHOENIX-5594 Different permission of 
phoenix-*-queryserver.log from umask (Toshihiro Suzuki)
61d504f is described below

commit 61d504f85805b5304d49b31fb120569da1165d33
Author: Toshihiro Suzuki 
AuthorDate: Fri Nov 29 22:05:04 2019 +0900

PHOENIX-5594 Different permission of phoenix-*-queryserver.log from umask 
(Toshihiro Suzuki)
---
 bin/queryserver.py | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/bin/queryserver.py b/bin/queryserver.py
index 0c07b3b..26d096c 100755
--- a/bin/queryserver.py
+++ b/bin/queryserver.py
@@ -148,6 +148,10 @@ if command == 'start':
 print >> sys.stderr, "daemon mode not supported on this platform"
 sys.exit(-1)
 
+# get the current umask for the sub process
+current_umask = os.umask(0)
+os.umask(current_umask)
+
 # run in the background
 d = os.path.dirname(out_file_path)
 if not os.path.exists(d):
@@ -171,8 +175,12 @@ if command == 'start':
 sys.exit(0)
 signal.signal(signal.SIGTERM, handler)
 
+def initsubproc():
+# set the parent's umask
+os.umask(current_umask)
+
 print '%s launching %s' % (datetime.datetime.now(), cmd)
-child = subprocess.Popen(cmd.split())
+child = subprocess.Popen(cmd.split(), preexec_fn=initsubproc)
 sys.exit(child.wait())
 
 elif command == 'stop':



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5594 Different permission of phoenix-*-queryserver.log from umask (Toshihiro Suzuki)

2019-11-29 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 786aded  PHOENIX-5594 Different permission of 
phoenix-*-queryserver.log from umask (Toshihiro Suzuki)
786aded is described below

commit 786aded5776807d4d092fd315db021f1d6052327
Author: Toshihiro Suzuki 
AuthorDate: Fri Nov 29 22:05:04 2019 +0900

PHOENIX-5594 Different permission of phoenix-*-queryserver.log from umask 
(Toshihiro Suzuki)
---
 bin/queryserver.py | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/bin/queryserver.py b/bin/queryserver.py
index 0c07b3b..26d096c 100755
--- a/bin/queryserver.py
+++ b/bin/queryserver.py
@@ -148,6 +148,10 @@ if command == 'start':
 print >> sys.stderr, "daemon mode not supported on this platform"
 sys.exit(-1)
 
+# get the current umask for the sub process
+current_umask = os.umask(0)
+os.umask(current_umask)
+
 # run in the background
 d = os.path.dirname(out_file_path)
 if not os.path.exists(d):
@@ -171,8 +175,12 @@ if command == 'start':
 sys.exit(0)
 signal.signal(signal.SIGTERM, handler)
 
+def initsubproc():
+# set the parent's umask
+os.umask(current_umask)
+
 print '%s launching %s' % (datetime.datetime.now(), cmd)
-child = subprocess.Popen(cmd.split())
+child = subprocess.Popen(cmd.split(), preexec_fn=initsubproc)
 sys.exit(child.wait())
 
 elif command == 'stop':



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5594 Different permission of phoenix-*-queryserver.log from umask (Toshihiro Suzuki)

2019-11-29 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new b41d44e  PHOENIX-5594 Different permission of 
phoenix-*-queryserver.log from umask (Toshihiro Suzuki)
b41d44e is described below

commit b41d44e6e72c2482bb91579fc81f8c11d4ef3818
Author: Toshihiro Suzuki 
AuthorDate: Fri Nov 29 22:05:04 2019 +0900

PHOENIX-5594 Different permission of phoenix-*-queryserver.log from umask 
(Toshihiro Suzuki)
---
 bin/queryserver.py | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/bin/queryserver.py b/bin/queryserver.py
index 0c07b3b..26d096c 100755
--- a/bin/queryserver.py
+++ b/bin/queryserver.py
@@ -148,6 +148,10 @@ if command == 'start':
 print >> sys.stderr, "daemon mode not supported on this platform"
 sys.exit(-1)
 
+# get the current umask for the sub process
+current_umask = os.umask(0)
+os.umask(current_umask)
+
 # run in the background
 d = os.path.dirname(out_file_path)
 if not os.path.exists(d):
@@ -171,8 +175,12 @@ if command == 'start':
 sys.exit(0)
 signal.signal(signal.SIGTERM, handler)
 
+def initsubproc():
+# set the parent's umask
+os.umask(current_umask)
+
 print '%s launching %s' % (datetime.datetime.now(), cmd)
-child = subprocess.Popen(cmd.split())
+child = subprocess.Popen(cmd.split(), preexec_fn=initsubproc)
 sys.exit(child.wait())
 
 elif command == 'stop':



[phoenix] branch master updated: PHOENIX-5594 Different permission of phoenix-*-queryserver.log from umask (Toshihiro Suzuki)

2019-11-29 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new c8c3136  PHOENIX-5594 Different permission of 
phoenix-*-queryserver.log from umask (Toshihiro Suzuki)
c8c3136 is described below

commit c8c31365fa67b7deb293b526ef0c96a7e878adbd
Author: Toshihiro Suzuki 
AuthorDate: Fri Nov 29 22:05:04 2019 +0900

PHOENIX-5594 Different permission of phoenix-*-queryserver.log from umask 
(Toshihiro Suzuki)
---
 bin/queryserver.py | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/bin/queryserver.py b/bin/queryserver.py
index 0c07b3b..26d096c 100755
--- a/bin/queryserver.py
+++ b/bin/queryserver.py
@@ -148,6 +148,10 @@ if command == 'start':
 print >> sys.stderr, "daemon mode not supported on this platform"
 sys.exit(-1)
 
+# get the current umask for the sub process
+current_umask = os.umask(0)
+os.umask(current_umask)
+
 # run in the background
 d = os.path.dirname(out_file_path)
 if not os.path.exists(d):
@@ -171,8 +175,12 @@ if command == 'start':
 sys.exit(0)
 signal.signal(signal.SIGTERM, handler)
 
+def initsubproc():
+# set the parent's umask
+os.umask(current_umask)
+
 print '%s launching %s' % (datetime.datetime.now(), cmd)
-child = subprocess.Popen(cmd.split())
+child = subprocess.Popen(cmd.split(), preexec_fn=initsubproc)
 sys.exit(child.wait())
 
 elif command == 'stop':



[phoenix-connectors] branch master updated (4a4308a -> 3d5241f)

2019-11-13 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix-connectors.git.


from 4a4308a  PHOENIX-5410 Phoenix spark to hbase connector takes long time 
persist data
 add 3d5241f  PHOENIX-5552 Hive against Phoenix gets "Expecting "RPAREN", 
got "L" in Tez mode"

No new revisions were added by this update.

Summary of changes:
 .../constants/PhoenixStorageHandlerConstants.java  |  43 --
 .../phoenix/hive/mapreduce/PhoenixInputFormat.java |  43 +-
 .../phoenix/hive/query/PhoenixQueryBuilder.java| 461 -
 .../hive/query/PhoenixQueryBuilderTest.java|  19 +
 4 files changed, 33 insertions(+), 533 deletions(-)



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5506 Psql load fails with lower table name

2019-10-07 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 9d15cc6  PHOENIX-5506 Psql load fails with lower table name
9d15cc6 is described below

commit 9d15cc62a2dd0b00fc38258f73740e88177949cd
Author: Ankit Singhal 
AuthorDate: Mon Oct 7 11:17:29 2019 -0700

PHOENIX-5506 Psql load fails with lower table name
---
 .../apache/phoenix/end2end/CSVCommonsLoaderIT.java | 35 ++
 .../org/apache/phoenix/util/CSVCommonsLoader.java  |  3 +-
 2 files changed, 37 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
index 9b5581d..8db7afd 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
@@ -23,7 +23,9 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.io.StringReader;
+import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
@@ -35,6 +37,7 @@ import java.util.Properties;
 import com.google.common.collect.ImmutableList;
 import org.apache.commons.csv.CSVParser;
 import org.apache.commons.csv.CSVRecord;
+import org.apache.commons.io.FileUtils;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
 import org.apache.phoenix.schema.IllegalDataException;
@@ -43,7 +46,9 @@ import org.apache.phoenix.schema.types.PArrayDataType;
 import org.apache.phoenix.util.CSVCommonsLoader;
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
 
 public class CSVCommonsLoaderIT extends ParallelStatsDisabledIT {
 
@@ -98,6 +103,8 @@ public class CSVCommonsLoaderIT extends 
ParallelStatsDisabledIT {
 + "\n"
 + CSV_VALUES_BAD_ENCAPSULATED_CONTROL_CHARS;
 
+@Rule public TemporaryFolder tempFolder = new TemporaryFolder();
+
 @Test
 public void testCSVCommonsUpsert() throws Exception {
 CSVParser parser = null;
@@ -766,4 +773,32 @@ public class CSVCommonsLoaderIT extends 
ParallelStatsDisabledIT {
 }
 
 }
+
+@Test public void testLowerCaseTable() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+conn.setAutoCommit(true);
+String tableName = generateUniqueName().toLowerCase();
+String t1 = generateUniqueName();
+String t2 = t1 + generateUniqueName();
+String csvFileName = "test.csv";
+conn.createStatement().execute("CREATE TABLE \"" + tableName
++ "\" (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY 
KEY(K1,K2))");
+File tempFile = tempFolder.newFile(csvFileName);
+FileUtils.writeStringToFile(tempFile, "'" + t1 + "','x'");
+try {
+CSVCommonsLoader csvLoader =
+new CSVCommonsLoader(conn.unwrap(PhoenixConnection.class), "" 
+ tableName + "",
+null, false, ',', '"', '\\', null);
+csvLoader.upsert(tempFile.getAbsolutePath());
+} catch (Exception e) {
+fail("Failed with Exception:" + e.getMessage());
+}
+ResultSet rs =
+conn.createStatement().executeQuery("SELECT * FROM \"" + tableName 
+ "\" order by k2");
+assertTrue(rs.next());
+assertEquals("'"+t1+"'",rs.getString(1));
+assertEquals("'"+"x"+"'",rs.getString(2));
+assertFalse(rs.next());
+
+}
 }
\ No newline at end of file
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
index 024e3cd..bd98bff 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
@@ -211,7 +211,8 @@ public class CSVCommonsLoader {
 long start = System.currentTimeMillis();
 CsvUpsertListener upsertListener = new CsvUpsertListener(conn,
 conn.getMutateBatchSize(), isStrict);
-CsvUpsertExecutor csvUpsertExecutor = new CsvUpsertExecutor(conn, 
tableName,
+CsvUpsertExecutor csvUpsertExecutor = new CsvUpsertExecutor(conn,
+SchemaUtil.getEscapedFullTableName(tableName),
 columnInfoList, upsertListener, arrayElementSeparator);
 
 csvUpsertExecutor.execute(csvParser);



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5506 Psql load fails with lower table name

2019-10-07 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 252ab1d  PHOENIX-5506 Psql load fails with lower table name
252ab1d is described below

commit 252ab1d0dfe76496db6afdc8f3c3dc6ef17493bb
Author: Ankit Singhal 
AuthorDate: Mon Oct 7 11:17:29 2019 -0700

PHOENIX-5506 Psql load fails with lower table name
---
 .../apache/phoenix/end2end/CSVCommonsLoaderIT.java | 35 ++
 .../org/apache/phoenix/util/CSVCommonsLoader.java  |  3 +-
 2 files changed, 37 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
index 9b5581d..8db7afd 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
@@ -23,7 +23,9 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.io.StringReader;
+import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
@@ -35,6 +37,7 @@ import java.util.Properties;
 import com.google.common.collect.ImmutableList;
 import org.apache.commons.csv.CSVParser;
 import org.apache.commons.csv.CSVRecord;
+import org.apache.commons.io.FileUtils;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
 import org.apache.phoenix.schema.IllegalDataException;
@@ -43,7 +46,9 @@ import org.apache.phoenix.schema.types.PArrayDataType;
 import org.apache.phoenix.util.CSVCommonsLoader;
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
 
 public class CSVCommonsLoaderIT extends ParallelStatsDisabledIT {
 
@@ -98,6 +103,8 @@ public class CSVCommonsLoaderIT extends 
ParallelStatsDisabledIT {
 + "\n"
 + CSV_VALUES_BAD_ENCAPSULATED_CONTROL_CHARS;
 
+@Rule public TemporaryFolder tempFolder = new TemporaryFolder();
+
 @Test
 public void testCSVCommonsUpsert() throws Exception {
 CSVParser parser = null;
@@ -766,4 +773,32 @@ public class CSVCommonsLoaderIT extends 
ParallelStatsDisabledIT {
 }
 
 }
+
+@Test public void testLowerCaseTable() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+conn.setAutoCommit(true);
+String tableName = generateUniqueName().toLowerCase();
+String t1 = generateUniqueName();
+String t2 = t1 + generateUniqueName();
+String csvFileName = "test.csv";
+conn.createStatement().execute("CREATE TABLE \"" + tableName
++ "\" (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY 
KEY(K1,K2))");
+File tempFile = tempFolder.newFile(csvFileName);
+FileUtils.writeStringToFile(tempFile, "'" + t1 + "','x'");
+try {
+CSVCommonsLoader csvLoader =
+new CSVCommonsLoader(conn.unwrap(PhoenixConnection.class), "" 
+ tableName + "",
+null, false, ',', '"', '\\', null);
+csvLoader.upsert(tempFile.getAbsolutePath());
+} catch (Exception e) {
+fail("Failed with Exception:" + e.getMessage());
+}
+ResultSet rs =
+conn.createStatement().executeQuery("SELECT * FROM \"" + tableName 
+ "\" order by k2");
+assertTrue(rs.next());
+assertEquals("'"+t1+"'",rs.getString(1));
+assertEquals("'"+"x"+"'",rs.getString(2));
+assertFalse(rs.next());
+
+}
 }
\ No newline at end of file
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
index 024e3cd..bd98bff 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
@@ -211,7 +211,8 @@ public class CSVCommonsLoader {
 long start = System.currentTimeMillis();
 CsvUpsertListener upsertListener = new CsvUpsertListener(conn,
 conn.getMutateBatchSize(), isStrict);
-CsvUpsertExecutor csvUpsertExecutor = new CsvUpsertExecutor(conn, 
tableName,
+CsvUpsertExecutor csvUpsertExecutor = new CsvUpsertExecutor(conn,
+SchemaUtil.getEscapedFullTableName(tableName),
 columnInfoList, upsertListener, arrayElementSeparator);
 
 csvUpsertExecutor.execute(csvParser);



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5506 Psql load fails with lower table name

2019-10-07 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new b0d98d7  PHOENIX-5506 Psql load fails with lower table name
b0d98d7 is described below

commit b0d98d7679de81b27ef6bacf406ac925ebdfe8dc
Author: Ankit Singhal 
AuthorDate: Mon Oct 7 11:17:29 2019 -0700

PHOENIX-5506 Psql load fails with lower table name
---
 .../apache/phoenix/end2end/CSVCommonsLoaderIT.java | 35 ++
 .../org/apache/phoenix/util/CSVCommonsLoader.java  |  3 +-
 2 files changed, 37 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
index 9b5581d..8db7afd 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
@@ -23,7 +23,9 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.io.StringReader;
+import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
@@ -35,6 +37,7 @@ import java.util.Properties;
 import com.google.common.collect.ImmutableList;
 import org.apache.commons.csv.CSVParser;
 import org.apache.commons.csv.CSVRecord;
+import org.apache.commons.io.FileUtils;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
 import org.apache.phoenix.schema.IllegalDataException;
@@ -43,7 +46,9 @@ import org.apache.phoenix.schema.types.PArrayDataType;
 import org.apache.phoenix.util.CSVCommonsLoader;
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
 
 public class CSVCommonsLoaderIT extends ParallelStatsDisabledIT {
 
@@ -98,6 +103,8 @@ public class CSVCommonsLoaderIT extends 
ParallelStatsDisabledIT {
 + "\n"
 + CSV_VALUES_BAD_ENCAPSULATED_CONTROL_CHARS;
 
+@Rule public TemporaryFolder tempFolder = new TemporaryFolder();
+
 @Test
 public void testCSVCommonsUpsert() throws Exception {
 CSVParser parser = null;
@@ -766,4 +773,32 @@ public class CSVCommonsLoaderIT extends 
ParallelStatsDisabledIT {
 }
 
 }
+
+@Test public void testLowerCaseTable() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+conn.setAutoCommit(true);
+String tableName = generateUniqueName().toLowerCase();
+String t1 = generateUniqueName();
+String t2 = t1 + generateUniqueName();
+String csvFileName = "test.csv";
+conn.createStatement().execute("CREATE TABLE \"" + tableName
++ "\" (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY 
KEY(K1,K2))");
+File tempFile = tempFolder.newFile(csvFileName);
+FileUtils.writeStringToFile(tempFile, "'" + t1 + "','x'");
+try {
+CSVCommonsLoader csvLoader =
+new CSVCommonsLoader(conn.unwrap(PhoenixConnection.class), "" 
+ tableName + "",
+null, false, ',', '"', '\\', null);
+csvLoader.upsert(tempFile.getAbsolutePath());
+} catch (Exception e) {
+fail("Failed with Exception:" + e.getMessage());
+}
+ResultSet rs =
+conn.createStatement().executeQuery("SELECT * FROM \"" + tableName 
+ "\" order by k2");
+assertTrue(rs.next());
+assertEquals("'"+t1+"'",rs.getString(1));
+assertEquals("'"+"x"+"'",rs.getString(2));
+assertFalse(rs.next());
+
+}
 }
\ No newline at end of file
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
index 024e3cd..bd98bff 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
@@ -211,7 +211,8 @@ public class CSVCommonsLoader {
 long start = System.currentTimeMillis();
 CsvUpsertListener upsertListener = new CsvUpsertListener(conn,
 conn.getMutateBatchSize(), isStrict);
-CsvUpsertExecutor csvUpsertExecutor = new CsvUpsertExecutor(conn, 
tableName,
+CsvUpsertExecutor csvUpsertExecutor = new CsvUpsertExecutor(conn,
+SchemaUtil.getEscapedFullTableName(tableName),
 columnInfoList, upsertListener, arrayElementSeparator);
 
 csvUpsertExecutor.execute(csvParser);



[phoenix] branch master updated: PHOENIX-5506 Psql load fails with lower table name

2019-10-07 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 7d0e2f8  PHOENIX-5506 Psql load fails with lower table name
7d0e2f8 is described below

commit 7d0e2f88b6e830a200d074eb167aeb2d2368e6c5
Author: Ankit Singhal 
AuthorDate: Mon Oct 7 11:17:29 2019 -0700

PHOENIX-5506 Psql load fails with lower table name
---
 .../apache/phoenix/end2end/CSVCommonsLoaderIT.java | 35 ++
 .../org/apache/phoenix/util/CSVCommonsLoader.java  |  3 +-
 2 files changed, 37 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
index 9b5581d..8db7afd 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
@@ -23,7 +23,9 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.io.StringReader;
+import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
@@ -35,6 +37,7 @@ import java.util.Properties;
 import com.google.common.collect.ImmutableList;
 import org.apache.commons.csv.CSVParser;
 import org.apache.commons.csv.CSVRecord;
+import org.apache.commons.io.FileUtils;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
 import org.apache.phoenix.schema.IllegalDataException;
@@ -43,7 +46,9 @@ import org.apache.phoenix.schema.types.PArrayDataType;
 import org.apache.phoenix.util.CSVCommonsLoader;
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
 
 public class CSVCommonsLoaderIT extends ParallelStatsDisabledIT {
 
@@ -98,6 +103,8 @@ public class CSVCommonsLoaderIT extends 
ParallelStatsDisabledIT {
 + "\n"
 + CSV_VALUES_BAD_ENCAPSULATED_CONTROL_CHARS;
 
+@Rule public TemporaryFolder tempFolder = new TemporaryFolder();
+
 @Test
 public void testCSVCommonsUpsert() throws Exception {
 CSVParser parser = null;
@@ -766,4 +773,32 @@ public class CSVCommonsLoaderIT extends 
ParallelStatsDisabledIT {
 }
 
 }
+
+@Test public void testLowerCaseTable() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+conn.setAutoCommit(true);
+String tableName = generateUniqueName().toLowerCase();
+String t1 = generateUniqueName();
+String t2 = t1 + generateUniqueName();
+String csvFileName = "test.csv";
+conn.createStatement().execute("CREATE TABLE \"" + tableName
++ "\" (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY 
KEY(K1,K2))");
+File tempFile = tempFolder.newFile(csvFileName);
+FileUtils.writeStringToFile(tempFile, "'" + t1 + "','x'");
+try {
+CSVCommonsLoader csvLoader =
+new CSVCommonsLoader(conn.unwrap(PhoenixConnection.class), "" 
+ tableName + "",
+null, false, ',', '"', '\\', null);
+csvLoader.upsert(tempFile.getAbsolutePath());
+} catch (Exception e) {
+fail("Failed with Exception:" + e.getMessage());
+}
+ResultSet rs =
+conn.createStatement().executeQuery("SELECT * FROM \"" + tableName 
+ "\" order by k2");
+assertTrue(rs.next());
+assertEquals("'"+t1+"'",rs.getString(1));
+assertEquals("'"+"x"+"'",rs.getString(2));
+assertFalse(rs.next());
+
+}
 }
\ No newline at end of file
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
index 59ed9cf..4ade283 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java
@@ -207,7 +207,8 @@ public class CSVCommonsLoader {
 long start = System.currentTimeMillis();
 CsvUpsertListener upsertListener = new CsvUpsertListener(conn,
 conn.getMutateBatchSize(), isStrict);
-CsvUpsertExecutor csvUpsertExecutor = new CsvUpsertExecutor(conn, 
tableName,
+CsvUpsertExecutor csvUpsertExecutor = new CsvUpsertExecutor(conn,
+SchemaUtil.getEscapedFullTableName(tableName),
 columnInfoList, upsertListener, arrayElementSeparator);
 
 csvUpsertExecutor.execute(csvParser);



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5411 Incorrect result is returned when using sum function with case when statement (Toshihiro Suzuki)

2019-07-26 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 3c5482e  PHOENIX-5411 Incorrect result is returned when using sum 
function with case when statement (Toshihiro Suzuki)
3c5482e is described below

commit 3c5482e91d8b8b11db6bc84ce3d97caa8e77e63f
Author: Ankit Singhal 
AuthorDate: Fri Jul 26 13:30:30 2019 -0700

PHOENIX-5411 Incorrect result is returned when using sum function with case 
when statement (Toshihiro Suzuki)
---
 .../org/apache/phoenix/end2end/SumFunctionIT.java  | 54 ++
 .../phoenix/expression/IsNullExpression.java   | 16 +++
 2 files changed, 70 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SumFunctionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SumFunctionIT.java
new file mode 100644
index 000..b7de30e
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SumFunctionIT.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.Statement;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertThat;
+
+public class SumFunctionIT extends ParallelStatsDisabledIT {
+@Test
+public void testSumFunctionWithCaseWhenStatement() throws Exception {
+String tableName = generateUniqueName();
+
+try (Connection c = DriverManager.getConnection(getUrl());
+  Statement s = c.createStatement()) {
+s.execute("create table " + tableName + " (id varchar primary key, 
col1 varchar, "
+  + "col2 integer)");
+s.execute("upsert into " + tableName + " values('id1', 'aaa', 2)");
+s.execute("upsert into " + tableName + " values('id2', null, 1)");
+c.commit();
+
+try (ResultSet rs = s.executeQuery(
+  "select sum(case when col1 is null then col2 else 0 end), "
++ "sum(case when col1 is not null then col2 else 0 end) from " 
+ tableName)) {
+
+assertThat(rs.next(), is(true));
+assertThat(rs.getInt(1), is(1));
+assertThat(rs.getInt(2), is(2));
+assertThat(rs.next(), is(false));
+}
+}
+}
+}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
index d8f6cbe..c2f43cb 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
@@ -128,4 +128,20 @@ public class IsNullExpression extends BaseSingleExpression 
{
 public boolean requiresFinalEvaluation() {
 return super.requiresFinalEvaluation() || !this.isNegate();
 }
+
+@Override
+public boolean equals(Object o) {
+if (!super.equals(o)) {
+return false;
+}
+IsNullExpression that = (IsNullExpression) o;
+return isNegate == that.isNegate;
+}
+
+@Override
+public int hashCode() {
+int result = super.hashCode();
+result = 31 * result + (isNegate ? 1 : 0);
+return result;
+}
 }



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5411 Incorrect result is returned when using sum function with case when statement (Toshihiro Suzuki)

2019-07-26 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 3f1dde0  PHOENIX-5411 Incorrect result is returned when using sum 
function with case when statement (Toshihiro Suzuki)
3f1dde0 is described below

commit 3f1dde08e83ee8c717f24172e4b6852cb39f28c9
Author: Ankit Singhal 
AuthorDate: Fri Jul 26 13:30:30 2019 -0700

PHOENIX-5411 Incorrect result is returned when using sum function with case 
when statement (Toshihiro Suzuki)
---
 .../org/apache/phoenix/end2end/SumFunctionIT.java  | 54 ++
 .../phoenix/expression/IsNullExpression.java   | 16 +++
 2 files changed, 70 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SumFunctionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SumFunctionIT.java
new file mode 100644
index 000..b7de30e
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SumFunctionIT.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.Statement;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertThat;
+
+public class SumFunctionIT extends ParallelStatsDisabledIT {
+@Test
+public void testSumFunctionWithCaseWhenStatement() throws Exception {
+String tableName = generateUniqueName();
+
+try (Connection c = DriverManager.getConnection(getUrl());
+  Statement s = c.createStatement()) {
+s.execute("create table " + tableName + " (id varchar primary key, 
col1 varchar, "
+  + "col2 integer)");
+s.execute("upsert into " + tableName + " values('id1', 'aaa', 2)");
+s.execute("upsert into " + tableName + " values('id2', null, 1)");
+c.commit();
+
+try (ResultSet rs = s.executeQuery(
+  "select sum(case when col1 is null then col2 else 0 end), "
++ "sum(case when col1 is not null then col2 else 0 end) from " 
+ tableName)) {
+
+assertThat(rs.next(), is(true));
+assertThat(rs.getInt(1), is(1));
+assertThat(rs.getInt(2), is(2));
+assertThat(rs.next(), is(false));
+}
+}
+}
+}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
index d8f6cbe..c2f43cb 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
@@ -128,4 +128,20 @@ public class IsNullExpression extends BaseSingleExpression 
{
 public boolean requiresFinalEvaluation() {
 return super.requiresFinalEvaluation() || !this.isNegate();
 }
+
+@Override
+public boolean equals(Object o) {
+if (!super.equals(o)) {
+return false;
+}
+IsNullExpression that = (IsNullExpression) o;
+return isNegate == that.isNegate;
+}
+
+@Override
+public int hashCode() {
+int result = super.hashCode();
+result = 31 * result + (isNegate ? 1 : 0);
+return result;
+}
 }



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5411 Incorrect result is returned when using sum function with case when statement (Toshihiro Suzuki)

2019-07-26 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new d3ea751  PHOENIX-5411 Incorrect result is returned when using sum 
function with case when statement (Toshihiro Suzuki)
d3ea751 is described below

commit d3ea751e539d9adc4f893b0bdacfc42b4a52
Author: Ankit Singhal 
AuthorDate: Fri Jul 26 13:30:30 2019 -0700

PHOENIX-5411 Incorrect result is returned when using sum function with case 
when statement (Toshihiro Suzuki)
---
 .../org/apache/phoenix/end2end/SumFunctionIT.java  | 54 ++
 .../phoenix/expression/IsNullExpression.java   | 16 +++
 2 files changed, 70 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SumFunctionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SumFunctionIT.java
new file mode 100644
index 000..b7de30e
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SumFunctionIT.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.Statement;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertThat;
+
+public class SumFunctionIT extends ParallelStatsDisabledIT {
+@Test
+public void testSumFunctionWithCaseWhenStatement() throws Exception {
+String tableName = generateUniqueName();
+
+try (Connection c = DriverManager.getConnection(getUrl());
+  Statement s = c.createStatement()) {
+s.execute("create table " + tableName + " (id varchar primary key, 
col1 varchar, "
+  + "col2 integer)");
+s.execute("upsert into " + tableName + " values('id1', 'aaa', 2)");
+s.execute("upsert into " + tableName + " values('id2', null, 1)");
+c.commit();
+
+try (ResultSet rs = s.executeQuery(
+  "select sum(case when col1 is null then col2 else 0 end), "
++ "sum(case when col1 is not null then col2 else 0 end) from " 
+ tableName)) {
+
+assertThat(rs.next(), is(true));
+assertThat(rs.getInt(1), is(1));
+assertThat(rs.getInt(2), is(2));
+assertThat(rs.next(), is(false));
+}
+}
+}
+}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
index d8f6cbe..c2f43cb 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
@@ -128,4 +128,20 @@ public class IsNullExpression extends BaseSingleExpression 
{
 public boolean requiresFinalEvaluation() {
 return super.requiresFinalEvaluation() || !this.isNegate();
 }
+
+@Override
+public boolean equals(Object o) {
+if (!super.equals(o)) {
+return false;
+}
+IsNullExpression that = (IsNullExpression) o;
+return isNegate == that.isNegate;
+}
+
+@Override
+public int hashCode() {
+int result = super.hashCode();
+result = 31 * result + (isNegate ? 1 : 0);
+return result;
+}
 }



[phoenix] branch master updated: PHOENIX-5411 Incorrect result is returned when using sum function with case when statement (Toshihiro Suzuki)

2019-07-26 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 665e224  PHOENIX-5411 Incorrect result is returned when using sum 
function with case when statement (Toshihiro Suzuki)
665e224 is described below

commit 665e224e4d9a0e991e38083b983ec38989dfd5e7
Author: Ankit Singhal 
AuthorDate: Fri Jul 26 13:30:30 2019 -0700

PHOENIX-5411 Incorrect result is returned when using sum function with case 
when statement (Toshihiro Suzuki)
---
 .../org/apache/phoenix/end2end/SumFunctionIT.java  | 54 ++
 .../phoenix/expression/IsNullExpression.java   | 16 +++
 2 files changed, 70 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SumFunctionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SumFunctionIT.java
new file mode 100644
index 000..b7de30e
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SumFunctionIT.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.Statement;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertThat;
+
+public class SumFunctionIT extends ParallelStatsDisabledIT {
+@Test
+public void testSumFunctionWithCaseWhenStatement() throws Exception {
+String tableName = generateUniqueName();
+
+try (Connection c = DriverManager.getConnection(getUrl());
+  Statement s = c.createStatement()) {
+s.execute("create table " + tableName + " (id varchar primary key, 
col1 varchar, "
+  + "col2 integer)");
+s.execute("upsert into " + tableName + " values('id1', 'aaa', 2)");
+s.execute("upsert into " + tableName + " values('id2', null, 1)");
+c.commit();
+
+try (ResultSet rs = s.executeQuery(
+  "select sum(case when col1 is null then col2 else 0 end), "
++ "sum(case when col1 is not null then col2 else 0 end) from " 
+ tableName)) {
+
+assertThat(rs.next(), is(true));
+assertThat(rs.getInt(1), is(1));
+assertThat(rs.getInt(2), is(2));
+assertThat(rs.next(), is(false));
+}
+}
+}
+}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
index d8f6cbe..c2f43cb 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/IsNullExpression.java
@@ -128,4 +128,20 @@ public class IsNullExpression extends BaseSingleExpression 
{
 public boolean requiresFinalEvaluation() {
 return super.requiresFinalEvaluation() || !this.isNegate();
 }
+
+@Override
+public boolean equals(Object o) {
+if (!super.equals(o)) {
+return false;
+}
+IsNullExpression that = (IsNullExpression) o;
+return isNegate == that.isNegate;
+}
+
+@Override
+public int hashCode() {
+int result = super.hashCode();
+result = 31 * result + (isNegate ? 1 : 0);
+return result;
+}
 }



svn commit: r1862899 - /phoenix/site/source/src/site/markdown/bulk_dataload.md

2019-07-10 Thread ankit
Author: ankit
Date: Thu Jul 11 02:31:14 2019
New Revision: 1862899

URL: http://svn.apache.org/viewvc?rev=1862899&view=rev
Log:
Document: A note on lower case table/schema name for Bulkload (Karthik 
Palanisamy)

Modified:
phoenix/site/source/src/site/markdown/bulk_dataload.md

Modified: phoenix/site/source/src/site/markdown/bulk_dataload.md
URL: 
http://svn.apache.org/viewvc/phoenix/site/source/src/site/markdown/bulk_dataload.md?rev=1862899&r1=1862898&r2=1862899&view=diff
==
--- phoenix/site/source/src/site/markdown/bulk_dataload.md (original)
+++ phoenix/site/source/src/site/markdown/bulk_dataload.md Thu Jul 11 02:31:14 
2019
@@ -126,3 +126,11 @@ Two ways in which you can supply a speci
 2. By entering the separator as Ctrl+v, and then pressing the tab key:
 
 -d '^v<tab>'
+
+ A note on lower case table/schema name
+
+Table names in Phoenix are case insensitive( generally uppercase). but 
sometimes user may require to do mapping of existing HBase table with lowercase 
name into Phoenix table, In this case, Double quotes around table name i.e 
"tablename" can be used to preserve case sensitivity. The same was extended to 
the bulkload options, but due to the way Apache Commons CLI library parse 
command line options(Ref CLI-275), we need to pass the argument as 
\\\"\\\"tablename\\\"\\\" instead of just "tablename" for CsvBulkLoadTool.
+
+Example:
+
+hadoop jar phoenix--client.jar 
org.apache.phoenix.mapreduce.CsvBulkLoadTool --table \"\"t\"\" --input 
/data/example.csv




svn commit: r1862898 - in /phoenix/site/publish: ./ language/

2019-07-10 Thread ankit
Author: ankit
Date: Thu Jul 11 01:56:38 2019
New Revision: 1862898

URL: http://svn.apache.org/viewvc?rev=1862898&view=rev
Log:
PHOENIX-5319 Document: A note on lower case table/schema name for Bulkload

Modified:
phoenix/site/publish/Phoenix-in-15-minutes-or-less.html
phoenix/site/publish/array_type.html
phoenix/site/publish/atomic_upsert.html
phoenix/site/publish/building.html
phoenix/site/publish/building_website.html
phoenix/site/publish/bulk_dataload.html
phoenix/site/publish/columnencoding.html
phoenix/site/publish/contributing.html
phoenix/site/publish/cursors.html
phoenix/site/publish/develop.html
phoenix/site/publish/download.html
phoenix/site/publish/dynamic_columns.html
phoenix/site/publish/explainplan.html
phoenix/site/publish/faq.html
phoenix/site/publish/flume.html
phoenix/site/publish/hive_storage_handler.html
phoenix/site/publish/index.html
phoenix/site/publish/installation.html
phoenix/site/publish/issues.html
phoenix/site/publish/joins.html
phoenix/site/publish/kafka.html
phoenix/site/publish/language/datatypes.html
phoenix/site/publish/language/functions.html
phoenix/site/publish/language/index.html
phoenix/site/publish/mailing_list.html
phoenix/site/publish/metrics.html
phoenix/site/publish/multi-tenancy.html
phoenix/site/publish/namspace_mapping.html
phoenix/site/publish/news.html
phoenix/site/publish/paged.html
phoenix/site/publish/performance.html
phoenix/site/publish/pherf.html
phoenix/site/publish/phoenix_mr.html
phoenix/site/publish/phoenix_on_emr.html
phoenix/site/publish/phoenix_orm.html
phoenix/site/publish/phoenix_python.html
phoenix/site/publish/phoenix_spark.html
phoenix/site/publish/phoenixcon.html
phoenix/site/publish/pig_integration.html
phoenix/site/publish/python.html
phoenix/site/publish/recent.html
phoenix/site/publish/release.html
phoenix/site/publish/release_notes.html
phoenix/site/publish/resources.html
phoenix/site/publish/roadmap.html
phoenix/site/publish/rowtimestamp.html
phoenix/site/publish/salted.html
phoenix/site/publish/secondary_indexing.html
phoenix/site/publish/sequences.html
phoenix/site/publish/server.html
phoenix/site/publish/skip_scan.html
phoenix/site/publish/source.html
phoenix/site/publish/subqueries.html
phoenix/site/publish/tablesample.html
phoenix/site/publish/team.html
phoenix/site/publish/tracing.html
phoenix/site/publish/transactions.html
phoenix/site/publish/tuning.html
phoenix/site/publish/tuning_guide.html
phoenix/site/publish/udf.html
phoenix/site/publish/update_statistics.html
phoenix/site/publish/upgrading.html
phoenix/site/publish/views.html
phoenix/site/publish/who_is_using.html

Modified: phoenix/site/publish/Phoenix-in-15-minutes-or-less.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/Phoenix-in-15-minutes-or-less.html?rev=1862898&r1=1862897&r2=1862898&view=diff
==
--- phoenix/site/publish/Phoenix-in-15-minutes-or-less.html (original)
+++ phoenix/site/publish/Phoenix-in-15-minutes-or-less.html Thu Jul 11 01:56:38 
2019
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/array_type.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/array_type.html?rev=1862898&r1=1862897&r2=1862898&view=diff
==
--- phoenix/site/publish/array_type.html (original)
+++ phoenix/site/publish/array_type.html Thu Jul 11 01:56:38 2019
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/atomic_upsert.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/atomic_upsert.html?rev=1862898&r1=1862897&r2=1862898&view=diff
==
--- phoenix/site/publish/atomic_upsert.html (original)
+++ phoenix/site/publish/atomic_upsert.html Thu Jul 11 01:56:38 2019
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/building.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/building.html?rev=1862898&r1=1862897&r2=1862898&view=diff
==
--- phoenix/site/publish/building.html (original)
+++ phoenix/site/publish/building.html Thu Jul 11 01:56:38 2019
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/building_website.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/building_website.html?rev=1862898&r1=1862897&r2=1862898&view=diff
==
--- phoenix/site/publish/building_website.html (original)
+++ phoenix/site/publish/building_website.html Thu Jul 11 01:56:38 2019
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/bulk_datal

[phoenix] branch 4.x-HBase-1.2 updated: PHOENIX-3541 Bulk Data Loading - Can't use table name by small letter

2019-07-10 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new 62e7487  PHOENIX-3541 Bulk Data Loading - Can't use table name by 
small letter
62e7487 is described below

commit 62e74879dba7cb77c529f0665a42726c51190696
Author: Karthik Palanisamy 
AuthorDate: Wed Jul 10 18:37:01 2019 -0700

PHOENIX-3541 Bulk Data Loading - Can't use table name by small letter
---
 .../apache/phoenix/end2end/CsvBulkLoadToolIT.java  | 68 ++
 .../phoenix/mapreduce/AbstractBulkLoadTool.java| 43 --
 2 files changed, 105 insertions(+), 6 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
index 699b469..7b73265 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
@@ -530,4 +530,72 @@ public class CsvBulkLoadToolIT extends BaseOwnClusterIT {
 }
 }
 }
+
+@Test
+public void testImportWithUpperCaseSchemaNameAndLowerCaseTableName() 
throws Exception {
+Statement stmt = conn.createStatement();
+stmt.execute("CREATE TABLE S.\"t\" (ID INTEGER NOT NULL PRIMARY KEY, 
NAME VARCHAR, " +
+  "T DATE) SPLIT ON (1,2)");
+FileSystem fs = FileSystem.get(getUtility().getConfiguration());
+FSDataOutputStream outputStream = fs.create(new 
Path("/tmp/input1.csv"));
+PrintWriter printWriter = new PrintWriter(outputStream);
+printWriter.println("1,Name 1,1970/01/01");
+printWriter.println("2,Name 2,1970/01/02");
+printWriter.close();
+CsvBulkLoadTool csvBulkLoadTool = new CsvBulkLoadTool();
+csvBulkLoadTool.setConf(new 
Configuration(getUtility().getConfiguration()));
+csvBulkLoadTool.getConf().set(DATE_FORMAT_ATTRIB,"/MM/dd");
+int exitCode = csvBulkLoadTool.run(new String[] {
+"--input", "/tmp/input1.csv",
+"--table", "\"\"t\"\"",
+"--schema", "S",
+"--zookeeper", zkQuorum});
+assertEquals(0, exitCode);
+ResultSet rs = stmt.executeQuery("SELECT id, name, t FROM S.\"t\" 
ORDER BY id");
+assertTrue(rs.next());
+assertEquals(1, rs.getInt(1));
+assertEquals("Name 1", rs.getString(2));
+assertEquals(DateUtil.parseDate("1970-01-01"), rs.getDate(3));
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(1));
+assertEquals("Name 2", rs.getString(2));
+assertEquals(DateUtil.parseDate("1970-01-02"), rs.getDate(3));
+assertFalse(rs.next());
+rs.close();
+stmt.close();
+}
+
+@Test
+public void testImportWithLowerCaseSchemaNameAndUpperCaseTableName() 
throws Exception {
+Statement stmt = conn.createStatement();
+stmt.execute("CREATE TABLE \"s\".T (ID INTEGER NOT NULL PRIMARY KEY, 
NAME VARCHAR, " +
+   "T DATE) SPLIT ON (1,2)");
+FileSystem fs = FileSystem.get(getUtility().getConfiguration());
+FSDataOutputStream outputStream = fs.create(new 
Path("/tmp/input1.csv"));
+PrintWriter printWriter = new PrintWriter(outputStream);
+printWriter.println("1,Name 1,1970/01/01");
+printWriter.println("2,Name 2,1970/01/02");
+printWriter.close();
+CsvBulkLoadTool csvBulkLoadTool = new CsvBulkLoadTool();
+csvBulkLoadTool.setConf(new 
Configuration(getUtility().getConfiguration()));
+csvBulkLoadTool.getConf().set(DATE_FORMAT_ATTRIB,"/MM/dd");
+int exitCode = csvBulkLoadTool.run(new String[] {
+"--input", "/tmp/input1.csv",
+"--table", "T",
+"--schema", "\"\"s\"\"",
+"--zookeeper", zkQuorum});
+assertEquals(0, exitCode);
+ResultSet rs = stmt.executeQuery("SELECT id, name, t FROM \"s\".T 
ORDER BY id");
+assertTrue(rs.next());
+assertEquals(1, rs.getInt(1));
+assertEquals("Name 1", rs.getString(2));
+assertEquals(DateUtil.parseDate("1970-01-01"), rs.getDate(3));
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(1));
+assertEquals("Name 2", rs.getString(2));
+assertEqua

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-3541 Bulk Data Loading - Can't use table name by small letter

2019-07-10 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 2ca1480  PHOENIX-3541 Bulk Data Loading - Can't use table name by 
small letter
2ca1480 is described below

commit 2ca148006d37a93341213538e5fd045d8ef020bc
Author: Karthik Palanisamy 
AuthorDate: Wed Jul 10 18:37:01 2019 -0700

PHOENIX-3541 Bulk Data Loading - Can't use table name by small letter
---
 .../apache/phoenix/end2end/CsvBulkLoadToolIT.java  | 68 ++
 .../phoenix/mapreduce/AbstractBulkLoadTool.java| 43 --
 2 files changed, 105 insertions(+), 6 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
index f91956c..b301263 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
@@ -536,4 +536,72 @@ public class CsvBulkLoadToolIT extends BaseOwnClusterIT {
 }
 }
 }
+
+@Test
+public void testImportWithUpperCaseSchemaNameAndLowerCaseTableName() 
throws Exception {
+Statement stmt = conn.createStatement();
+stmt.execute("CREATE TABLE S.\"t\" (ID INTEGER NOT NULL PRIMARY KEY, 
NAME VARCHAR, " +
+  "T DATE) SPLIT ON (1,2)");
+FileSystem fs = FileSystem.get(getUtility().getConfiguration());
+FSDataOutputStream outputStream = fs.create(new 
Path("/tmp/input1.csv"));
+PrintWriter printWriter = new PrintWriter(outputStream);
+printWriter.println("1,Name 1,1970/01/01");
+printWriter.println("2,Name 2,1970/01/02");
+printWriter.close();
+CsvBulkLoadTool csvBulkLoadTool = new CsvBulkLoadTool();
+csvBulkLoadTool.setConf(new 
Configuration(getUtility().getConfiguration()));
+csvBulkLoadTool.getConf().set(DATE_FORMAT_ATTRIB,"/MM/dd");
+int exitCode = csvBulkLoadTool.run(new String[] {
+"--input", "/tmp/input1.csv",
+"--table", "\"\"t\"\"",
+"--schema", "S",
+"--zookeeper", zkQuorum});
+assertEquals(0, exitCode);
+ResultSet rs = stmt.executeQuery("SELECT id, name, t FROM S.\"t\" 
ORDER BY id");
+assertTrue(rs.next());
+assertEquals(1, rs.getInt(1));
+assertEquals("Name 1", rs.getString(2));
+assertEquals(DateUtil.parseDate("1970-01-01"), rs.getDate(3));
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(1));
+assertEquals("Name 2", rs.getString(2));
+assertEquals(DateUtil.parseDate("1970-01-02"), rs.getDate(3));
+assertFalse(rs.next());
+rs.close();
+stmt.close();
+}
+
+@Test
+public void testImportWithLowerCaseSchemaNameAndUpperCaseTableName() 
throws Exception {
+Statement stmt = conn.createStatement();
+stmt.execute("CREATE TABLE \"s\".T (ID INTEGER NOT NULL PRIMARY KEY, 
NAME VARCHAR, " +
+   "T DATE) SPLIT ON (1,2)");
+FileSystem fs = FileSystem.get(getUtility().getConfiguration());
+FSDataOutputStream outputStream = fs.create(new 
Path("/tmp/input1.csv"));
+PrintWriter printWriter = new PrintWriter(outputStream);
+printWriter.println("1,Name 1,1970/01/01");
+printWriter.println("2,Name 2,1970/01/02");
+printWriter.close();
+CsvBulkLoadTool csvBulkLoadTool = new CsvBulkLoadTool();
+csvBulkLoadTool.setConf(new 
Configuration(getUtility().getConfiguration()));
+csvBulkLoadTool.getConf().set(DATE_FORMAT_ATTRIB,"/MM/dd");
+int exitCode = csvBulkLoadTool.run(new String[] {
+"--input", "/tmp/input1.csv",
+"--table", "T",
+"--schema", "\"\"s\"\"",
+"--zookeeper", zkQuorum});
+assertEquals(0, exitCode);
+ResultSet rs = stmt.executeQuery("SELECT id, name, t FROM \"s\".T 
ORDER BY id");
+assertTrue(rs.next());
+assertEquals(1, rs.getInt(1));
+assertEquals("Name 1", rs.getString(2));
+assertEquals(DateUtil.parseDate("1970-01-01"), rs.getDate(3));
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(1));
+assertEquals("Name 2", rs.getString(2));
+assertEqua

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-3541 Bulk Data Loading - Can't use table name by small letter

2019-07-10 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new d8a6532  PHOENIX-3541 Bulk Data Loading - Can't use table name by 
small letter
d8a6532 is described below

commit d8a6532335e80351da79362c5e0a690b9054c9d3
Author: Karthik Palanisamy 
AuthorDate: Wed Jul 10 18:37:01 2019 -0700

PHOENIX-3541 Bulk Data Loading - Can't use table name by small letter
---
 .../apache/phoenix/end2end/CsvBulkLoadToolIT.java  | 68 ++
 .../phoenix/mapreduce/AbstractBulkLoadTool.java| 43 --
 2 files changed, 105 insertions(+), 6 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
index f91956c..b301263 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
@@ -536,4 +536,72 @@ public class CsvBulkLoadToolIT extends BaseOwnClusterIT {
 }
 }
 }
+
+@Test
+public void testImportWithUpperCaseSchemaNameAndLowerCaseTableName() 
throws Exception {
+Statement stmt = conn.createStatement();
+stmt.execute("CREATE TABLE S.\"t\" (ID INTEGER NOT NULL PRIMARY KEY, 
NAME VARCHAR, " +
+  "T DATE) SPLIT ON (1,2)");
+FileSystem fs = FileSystem.get(getUtility().getConfiguration());
+FSDataOutputStream outputStream = fs.create(new 
Path("/tmp/input1.csv"));
+PrintWriter printWriter = new PrintWriter(outputStream);
+printWriter.println("1,Name 1,1970/01/01");
+printWriter.println("2,Name 2,1970/01/02");
+printWriter.close();
+CsvBulkLoadTool csvBulkLoadTool = new CsvBulkLoadTool();
+csvBulkLoadTool.setConf(new 
Configuration(getUtility().getConfiguration()));
+csvBulkLoadTool.getConf().set(DATE_FORMAT_ATTRIB,"/MM/dd");
+int exitCode = csvBulkLoadTool.run(new String[] {
+"--input", "/tmp/input1.csv",
+"--table", "\"\"t\"\"",
+"--schema", "S",
+"--zookeeper", zkQuorum});
+assertEquals(0, exitCode);
+ResultSet rs = stmt.executeQuery("SELECT id, name, t FROM S.\"t\" 
ORDER BY id");
+assertTrue(rs.next());
+assertEquals(1, rs.getInt(1));
+assertEquals("Name 1", rs.getString(2));
+assertEquals(DateUtil.parseDate("1970-01-01"), rs.getDate(3));
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(1));
+assertEquals("Name 2", rs.getString(2));
+assertEquals(DateUtil.parseDate("1970-01-02"), rs.getDate(3));
+assertFalse(rs.next());
+rs.close();
+stmt.close();
+}
+
+@Test
+public void testImportWithLowerCaseSchemaNameAndUpperCaseTableName() 
throws Exception {
+Statement stmt = conn.createStatement();
+stmt.execute("CREATE TABLE \"s\".T (ID INTEGER NOT NULL PRIMARY KEY, 
NAME VARCHAR, " +
+   "T DATE) SPLIT ON (1,2)");
+FileSystem fs = FileSystem.get(getUtility().getConfiguration());
+FSDataOutputStream outputStream = fs.create(new 
Path("/tmp/input1.csv"));
+PrintWriter printWriter = new PrintWriter(outputStream);
+printWriter.println("1,Name 1,1970/01/01");
+printWriter.println("2,Name 2,1970/01/02");
+printWriter.close();
+CsvBulkLoadTool csvBulkLoadTool = new CsvBulkLoadTool();
+csvBulkLoadTool.setConf(new 
Configuration(getUtility().getConfiguration()));
+csvBulkLoadTool.getConf().set(DATE_FORMAT_ATTRIB,"/MM/dd");
+int exitCode = csvBulkLoadTool.run(new String[] {
+"--input", "/tmp/input1.csv",
+"--table", "T",
+"--schema", "\"\"s\"\"",
+"--zookeeper", zkQuorum});
+assertEquals(0, exitCode);
+ResultSet rs = stmt.executeQuery("SELECT id, name, t FROM \"s\".T 
ORDER BY id");
+assertTrue(rs.next());
+assertEquals(1, rs.getInt(1));
+assertEquals("Name 1", rs.getString(2));
+assertEquals(DateUtil.parseDate("1970-01-01"), rs.getDate(3));
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(1));
+assertEquals("Name 2", rs.getString(2));
+assertEqua

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-3541 Bulk Data Loading - Can't use table name by small letter

2019-07-10 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 685fb07  PHOENIX-3541 Bulk Data Loading - Can't use table name by 
small letter
685fb07 is described below

commit 685fb0756dc469c7eaaf1085e3409c3046b3e1d2
Author: Karthik Palanisamy 
AuthorDate: Wed Jul 10 18:37:01 2019 -0700

PHOENIX-3541 Bulk Data Loading - Can't use table name by small letter
---
 .../apache/phoenix/end2end/CsvBulkLoadToolIT.java  | 68 ++
 .../phoenix/mapreduce/AbstractBulkLoadTool.java| 43 --
 2 files changed, 105 insertions(+), 6 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
index f91956c..b301263 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
@@ -536,4 +536,72 @@ public class CsvBulkLoadToolIT extends BaseOwnClusterIT {
 }
 }
 }
+
+@Test
+public void testImportWithUpperCaseSchemaNameAndLowerCaseTableName() 
throws Exception {
+Statement stmt = conn.createStatement();
+stmt.execute("CREATE TABLE S.\"t\" (ID INTEGER NOT NULL PRIMARY KEY, 
NAME VARCHAR, " +
+  "T DATE) SPLIT ON (1,2)");
+FileSystem fs = FileSystem.get(getUtility().getConfiguration());
+FSDataOutputStream outputStream = fs.create(new 
Path("/tmp/input1.csv"));
+PrintWriter printWriter = new PrintWriter(outputStream);
+printWriter.println("1,Name 1,1970/01/01");
+printWriter.println("2,Name 2,1970/01/02");
+printWriter.close();
+CsvBulkLoadTool csvBulkLoadTool = new CsvBulkLoadTool();
+csvBulkLoadTool.setConf(new 
Configuration(getUtility().getConfiguration()));
+csvBulkLoadTool.getConf().set(DATE_FORMAT_ATTRIB,"/MM/dd");
+int exitCode = csvBulkLoadTool.run(new String[] {
+"--input", "/tmp/input1.csv",
+"--table", "\"\"t\"\"",
+"--schema", "S",
+"--zookeeper", zkQuorum});
+assertEquals(0, exitCode);
+ResultSet rs = stmt.executeQuery("SELECT id, name, t FROM S.\"t\" 
ORDER BY id");
+assertTrue(rs.next());
+assertEquals(1, rs.getInt(1));
+assertEquals("Name 1", rs.getString(2));
+assertEquals(DateUtil.parseDate("1970-01-01"), rs.getDate(3));
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(1));
+assertEquals("Name 2", rs.getString(2));
+assertEquals(DateUtil.parseDate("1970-01-02"), rs.getDate(3));
+assertFalse(rs.next());
+rs.close();
+stmt.close();
+}
+
+@Test
+public void testImportWithLowerCaseSchemaNameAndUpperCaseTableName() 
throws Exception {
+Statement stmt = conn.createStatement();
+stmt.execute("CREATE TABLE \"s\".T (ID INTEGER NOT NULL PRIMARY KEY, 
NAME VARCHAR, " +
+   "T DATE) SPLIT ON (1,2)");
+FileSystem fs = FileSystem.get(getUtility().getConfiguration());
+FSDataOutputStream outputStream = fs.create(new 
Path("/tmp/input1.csv"));
+PrintWriter printWriter = new PrintWriter(outputStream);
+printWriter.println("1,Name 1,1970/01/01");
+printWriter.println("2,Name 2,1970/01/02");
+printWriter.close();
+CsvBulkLoadTool csvBulkLoadTool = new CsvBulkLoadTool();
+csvBulkLoadTool.setConf(new 
Configuration(getUtility().getConfiguration()));
+csvBulkLoadTool.getConf().set(DATE_FORMAT_ATTRIB,"/MM/dd");
+int exitCode = csvBulkLoadTool.run(new String[] {
+"--input", "/tmp/input1.csv",
+"--table", "T",
+"--schema", "\"\"s\"\"",
+"--zookeeper", zkQuorum});
+assertEquals(0, exitCode);
+ResultSet rs = stmt.executeQuery("SELECT id, name, t FROM \"s\".T 
ORDER BY id");
+assertTrue(rs.next());
+assertEquals(1, rs.getInt(1));
+assertEquals("Name 1", rs.getString(2));
+assertEquals(DateUtil.parseDate("1970-01-01"), rs.getDate(3));
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(1));
+assertEquals("Name 2", rs.getString(2));
+assertEqua

[phoenix] branch master updated: PHOENIX-3541 Bulk Data Loading - Can't use table name by small letter

2019-07-10 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new bca29e2  PHOENIX-3541 Bulk Data Loading - Can't use table name by 
small letter
bca29e2 is described below

commit bca29e25a7356a35044a98e92ae1451b991ba887
Author: Karthik Palanisamy 
AuthorDate: Wed Jul 10 18:37:01 2019 -0700

PHOENIX-3541 Bulk Data Loading - Can't use table name by small letter
---
 .../apache/phoenix/end2end/CsvBulkLoadToolIT.java  | 68 ++
 .../phoenix/mapreduce/AbstractBulkLoadTool.java| 43 --
 2 files changed, 105 insertions(+), 6 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
index f91956c..b301263 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
@@ -536,4 +536,72 @@ public class CsvBulkLoadToolIT extends BaseOwnClusterIT {
 }
 }
 }
+
+@Test
+public void testImportWithUpperCaseSchemaNameAndLowerCaseTableName() 
throws Exception {
+Statement stmt = conn.createStatement();
+stmt.execute("CREATE TABLE S.\"t\" (ID INTEGER NOT NULL PRIMARY KEY, 
NAME VARCHAR, " +
+  "T DATE) SPLIT ON (1,2)");
+FileSystem fs = FileSystem.get(getUtility().getConfiguration());
+FSDataOutputStream outputStream = fs.create(new 
Path("/tmp/input1.csv"));
+PrintWriter printWriter = new PrintWriter(outputStream);
+printWriter.println("1,Name 1,1970/01/01");
+printWriter.println("2,Name 2,1970/01/02");
+printWriter.close();
+CsvBulkLoadTool csvBulkLoadTool = new CsvBulkLoadTool();
+csvBulkLoadTool.setConf(new 
Configuration(getUtility().getConfiguration()));
+csvBulkLoadTool.getConf().set(DATE_FORMAT_ATTRIB,"/MM/dd");
+int exitCode = csvBulkLoadTool.run(new String[] {
+"--input", "/tmp/input1.csv",
+"--table", "\"\"t\"\"",
+"--schema", "S",
+"--zookeeper", zkQuorum});
+assertEquals(0, exitCode);
+ResultSet rs = stmt.executeQuery("SELECT id, name, t FROM S.\"t\" 
ORDER BY id");
+assertTrue(rs.next());
+assertEquals(1, rs.getInt(1));
+assertEquals("Name 1", rs.getString(2));
+assertEquals(DateUtil.parseDate("1970-01-01"), rs.getDate(3));
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(1));
+assertEquals("Name 2", rs.getString(2));
+assertEquals(DateUtil.parseDate("1970-01-02"), rs.getDate(3));
+assertFalse(rs.next());
+rs.close();
+stmt.close();
+}
+
+@Test
+public void testImportWithLowerCaseSchemaNameAndUpperCaseTableName() 
throws Exception {
+Statement stmt = conn.createStatement();
+stmt.execute("CREATE TABLE \"s\".T (ID INTEGER NOT NULL PRIMARY KEY, 
NAME VARCHAR, " +
+   "T DATE) SPLIT ON (1,2)");
+FileSystem fs = FileSystem.get(getUtility().getConfiguration());
+FSDataOutputStream outputStream = fs.create(new 
Path("/tmp/input1.csv"));
+PrintWriter printWriter = new PrintWriter(outputStream);
+printWriter.println("1,Name 1,1970/01/01");
+printWriter.println("2,Name 2,1970/01/02");
+printWriter.close();
+CsvBulkLoadTool csvBulkLoadTool = new CsvBulkLoadTool();
+csvBulkLoadTool.setConf(new 
Configuration(getUtility().getConfiguration()));
+csvBulkLoadTool.getConf().set(DATE_FORMAT_ATTRIB,"/MM/dd");
+int exitCode = csvBulkLoadTool.run(new String[] {
+"--input", "/tmp/input1.csv",
+"--table", "T",
+"--schema", "\"\"s\"\"",
+"--zookeeper", zkQuorum});
+assertEquals(0, exitCode);
+ResultSet rs = stmt.executeQuery("SELECT id, name, t FROM \"s\".T 
ORDER BY id");
+assertTrue(rs.next());
+assertEquals(1, rs.getInt(1));
+assertEquals("Name 1", rs.getString(2));
+assertEquals(DateUtil.parseDate("1970-01-01"), rs.getDate(3));
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(1));
+assertEquals("Name 2", rs.getString(2));
+assertEquals(DateUtil.par

[phoenix] branch 4.x-HBase-1.2 updated: PHOENIX-5178 SYSTEM schema is not getting cached at MetaData server(addendum)

2019-03-11 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new 6961355  PHOENIX-5178 SYSTEM schema is not getting cached at MetaData 
server(addendum)
6961355 is described below

commit 69613556242a6638eb3eccc6c1f8f111b59c3a64
Author: Ankit Singhal 
AuthorDate: Mon Mar 11 21:54:40 2019 -0700

PHOENIX-5178 SYSTEM schema is not getting cached at MetaData 
server(addendum)
---
 .../java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java  | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 96e6b3a..7d1ae2b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1085,7 +1085,12 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, 
false));
 }
 Scan scan = new Scan();
-scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
+if (clientTimeStamp != HConstants.LATEST_TIMESTAMP
+&& clientTimeStamp != HConstants.OLDEST_TIMESTAMP) {
+scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
+} else {
+scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp);
+}
 ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges);
 scanRanges.initializeScan(scan);
 scan.setFilter(scanRanges.getSkipScanFilter());



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5178 SYSTEM schema is not getting cached at MetaData server(addendum)

2019-03-11 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 9cc3529  PHOENIX-5178 SYSTEM schema is not getting cached at MetaData 
server(addendum)
9cc3529 is described below

commit 9cc3529ea133b118d8272e16e0fc24301063d73f
Author: Ankit Singhal 
AuthorDate: Mon Mar 11 21:54:40 2019 -0700

PHOENIX-5178 SYSTEM schema is not getting cached at MetaData 
server(addendum)
---
 .../java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java  | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 96e6b3a..7d1ae2b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1085,7 +1085,12 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, 
false));
 }
 Scan scan = new Scan();
-scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
+if (clientTimeStamp != HConstants.LATEST_TIMESTAMP
+&& clientTimeStamp != HConstants.OLDEST_TIMESTAMP) {
+scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
+} else {
+scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp);
+}
 ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges);
 scanRanges.initializeScan(scan);
 scan.setFilter(scanRanges.getSkipScanFilter());



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5178 SYSTEM schema is not getting cached at MetaData server(addendum)

2019-03-11 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new d03d90f  PHOENIX-5178 SYSTEM schema is not getting cached at MetaData 
server(addendum)
d03d90f is described below

commit d03d90f1a9c53540c713cf09e010dac955f823a0
Author: Ankit Singhal 
AuthorDate: Mon Mar 11 21:54:40 2019 -0700

PHOENIX-5178 SYSTEM schema is not getting cached at MetaData 
server(addendum)
---
 .../java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java  | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 96e6b3a..7d1ae2b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1085,7 +1085,12 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, 
false));
 }
 Scan scan = new Scan();
-scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
+if (clientTimeStamp != HConstants.LATEST_TIMESTAMP
+&& clientTimeStamp != HConstants.OLDEST_TIMESTAMP) {
+scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
+} else {
+scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp);
+}
 ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges);
 scanRanges.initializeScan(scan);
 scan.setFilter(scanRanges.getSkipScanFilter());



[phoenix] branch master updated: PHOENIX-5178 SYSTEM schema is not getting cached at MetaData server(addendum)

2019-03-11 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new f969444  PHOENIX-5178 SYSTEM schema is not getting cached at MetaData 
server(addendum)
f969444 is described below

commit f969444c96a060a5619e70e543c6d6fb21b32bed
Author: Ankit Singhal 
AuthorDate: Mon Mar 11 21:54:40 2019 -0700

PHOENIX-5178 SYSTEM schema is not getting cached at MetaData 
server(addendum)
---
 .../java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java  | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 06d36d9..192d004 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1088,7 +1088,12 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
 keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, 
false));
 }
 Scan scan = new Scan();
-scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
+if (clientTimeStamp != HConstants.LATEST_TIMESTAMP
+&& clientTimeStamp != HConstants.OLDEST_TIMESTAMP) {
+scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
+} else {
+scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp);
+}
 ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges);
 scanRanges.initializeScan(scan);
 scan.setFilter(scanRanges.getSkipScanFilter());



[phoenix] branch 4.x-HBase-1.2 updated: PHOENIX-5178 SYSTEM schema is not getting cached at MetaData server

2019-03-11 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new 1d310ea  PHOENIX-5178 SYSTEM schema is not getting cached at MetaData 
server
1d310ea is described below

commit 1d310ea3072d35c8ef32a446014f79f7c69b1bb1
Author: Ankit Singhal 
AuthorDate: Mon Mar 11 17:09:36 2019 -0700

PHOENIX-5178 SYSTEM schema is not getting cached at MetaData server
---
 .../phoenix/end2end/SystemTablePermissionsIT.java  | 43 ++
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  2 +-
 2 files changed, 44 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
index 0788ed7..6da970b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
@@ -17,12 +17,23 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.Statement;
 import java.util.Collections;
+import java.util.Properties;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.NewerSchemaAlreadyExistsException;
+import org.apache.phoenix.schema.NewerTableAlreadyExistsException;
+import org.apache.phoenix.util.PhoenixRuntime;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -84,5 +95,37 @@ public class SystemTablePermissionsIT extends 
BasePermissionsIT {
 
 // Make sure that the unprivileged user can now read the table
 verifyAllowed(readTable(TABLE_NAME), regularUser1);
+//This verification is added to test PHOENIX-5178
+superUser1.runAs(new PrivilegedExceptionAction() {
+@Override public Void run() throws Exception {
+try {
+if (isNamespaceMapped) {
+grantPermissions(regularUser1.getShortName(),"SYSTEM", 
Action.ADMIN);
+}
+return null;
+} catch (Throwable e) {
+throw new Exception(e);
+}
+
+}
+});
+if(isNamespaceMapped) {
+verifyAllowed(new AccessTestAction() {
+@Override public Object run() throws Exception {
+Properties props = new Properties();
+
props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
Boolean.toString(isNamespaceMapped));
+props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP));
+//Impersonate meta connection
+try (Connection metaConnection = 
DriverManager.getConnection(getUrl(), props);
+Statement stmt = metaConnection.createStatement()) {
+stmt.executeUpdate("CREATE SCHEMA IF NOT EXISTS 
SYSTEM");
+}catch(NewerSchemaAlreadyExistsException e){
+
+}
+return null;
+}
+}, regularUser1);
+}
 }
+
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 4fbeea0..96e6b3a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1085,7 +1085,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, 
false));
 }
 Scan scan = new Scan();
-scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp);
+scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
 ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges);
 scanRanges.initializeScan(scan);
 scan.setFilter(scanRanges.getSkipScanFilter());



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5178 SYSTEM schema is not getting cached at MetaData server

2019-03-11 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 0cd6912  PHOENIX-5178 SYSTEM schema is not getting cached at MetaData 
server
0cd6912 is described below

commit 0cd691294372ee3cf6144a3fd4054d5770bf5e2e
Author: Ankit Singhal 
AuthorDate: Mon Mar 11 17:09:36 2019 -0700

PHOENIX-5178 SYSTEM schema is not getting cached at MetaData server
---
 .../phoenix/end2end/SystemTablePermissionsIT.java  | 43 ++
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  2 +-
 2 files changed, 44 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
index 0788ed7..6da970b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
@@ -17,12 +17,23 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.Statement;
 import java.util.Collections;
+import java.util.Properties;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.NewerSchemaAlreadyExistsException;
+import org.apache.phoenix.schema.NewerTableAlreadyExistsException;
+import org.apache.phoenix.util.PhoenixRuntime;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -84,5 +95,37 @@ public class SystemTablePermissionsIT extends 
BasePermissionsIT {
 
 // Make sure that the unprivileged user can now read the table
 verifyAllowed(readTable(TABLE_NAME), regularUser1);
+//This verification is added to test PHOENIX-5178
+superUser1.runAs(new PrivilegedExceptionAction() {
+@Override public Void run() throws Exception {
+try {
+if (isNamespaceMapped) {
+grantPermissions(regularUser1.getShortName(),"SYSTEM", 
Action.ADMIN);
+}
+return null;
+} catch (Throwable e) {
+throw new Exception(e);
+}
+
+}
+});
+if(isNamespaceMapped) {
+verifyAllowed(new AccessTestAction() {
+@Override public Object run() throws Exception {
+Properties props = new Properties();
+
props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
Boolean.toString(isNamespaceMapped));
+props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP));
+//Impersonate meta connection
+try (Connection metaConnection = 
DriverManager.getConnection(getUrl(), props);
+Statement stmt = metaConnection.createStatement()) {
+stmt.executeUpdate("CREATE SCHEMA IF NOT EXISTS 
SYSTEM");
+}catch(NewerSchemaAlreadyExistsException e){
+
+}
+return null;
+}
+}, regularUser1);
+}
 }
+
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 4fbeea0..96e6b3a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1085,7 +1085,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, 
false));
 }
 Scan scan = new Scan();
-scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp);
+scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
 ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges);
 scanRanges.initializeScan(scan);
 scan.setFilter(scanRanges.getSkipScanFilter());



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5178 SYSTEM schema is not getting cached at MetaData server

2019-03-11 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 28ca281  PHOENIX-5178 SYSTEM schema is not getting cached at MetaData 
server
28ca281 is described below

commit 28ca281dd9bdce240d96f15c57e818780175b25a
Author: Ankit Singhal 
AuthorDate: Mon Mar 11 17:09:36 2019 -0700

PHOENIX-5178 SYSTEM schema is not getting cached at MetaData server
---
 .../phoenix/end2end/SystemTablePermissionsIT.java  | 43 ++
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  2 +-
 2 files changed, 44 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
index 0788ed7..6da970b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
@@ -17,12 +17,23 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.Statement;
 import java.util.Collections;
+import java.util.Properties;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.NewerSchemaAlreadyExistsException;
+import org.apache.phoenix.schema.NewerTableAlreadyExistsException;
+import org.apache.phoenix.util.PhoenixRuntime;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -84,5 +95,37 @@ public class SystemTablePermissionsIT extends 
BasePermissionsIT {
 
 // Make sure that the unprivileged user can now read the table
 verifyAllowed(readTable(TABLE_NAME), regularUser1);
+//This verification is added to test PHOENIX-5178
+superUser1.runAs(new PrivilegedExceptionAction() {
+@Override public Void run() throws Exception {
+try {
+if (isNamespaceMapped) {
+grantPermissions(regularUser1.getShortName(),"SYSTEM", 
Action.ADMIN);
+}
+return null;
+} catch (Throwable e) {
+throw new Exception(e);
+}
+
+}
+});
+if(isNamespaceMapped) {
+verifyAllowed(new AccessTestAction() {
+@Override public Object run() throws Exception {
+Properties props = new Properties();
+
props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
Boolean.toString(isNamespaceMapped));
+props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP));
+//Impersonate meta connection
+try (Connection metaConnection = 
DriverManager.getConnection(getUrl(), props);
+Statement stmt = metaConnection.createStatement()) {
+stmt.executeUpdate("CREATE SCHEMA IF NOT EXISTS 
SYSTEM");
+}catch(NewerSchemaAlreadyExistsException e){
+
+}
+return null;
+}
+}, regularUser1);
+}
 }
+
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 4fbeea0..96e6b3a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1085,7 +1085,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, 
false));
 }
 Scan scan = new Scan();
-scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp);
+scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
 ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges);
 scanRanges.initializeScan(scan);
 scan.setFilter(scanRanges.getSkipScanFilter());



[phoenix] branch master updated: PHOENIX-5178 SYSTEM schema is not getting cached at MetaData server

2019-03-11 Thread ankit
This is an automated email from the ASF dual-hosted git repository.

ankit pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 5d66774  PHOENIX-5178 SYSTEM schema is not getting cached at MetaData 
server
5d66774 is described below

commit 5d66774fe886cdc9cf060276a367559cf908c091
Author: Ankit Singhal 
AuthorDate: Mon Mar 11 17:09:36 2019 -0700

PHOENIX-5178 SYSTEM schema is not getting cached at MetaData server
---
 .../phoenix/end2end/SystemTablePermissionsIT.java  | 43 ++
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  2 +-
 2 files changed, 44 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
index 0788ed7..6da970b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
@@ -17,12 +17,23 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.Statement;
 import java.util.Collections;
+import java.util.Properties;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.NewerSchemaAlreadyExistsException;
+import org.apache.phoenix.schema.NewerTableAlreadyExistsException;
+import org.apache.phoenix.util.PhoenixRuntime;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -84,5 +95,37 @@ public class SystemTablePermissionsIT extends 
BasePermissionsIT {
 
 // Make sure that the unprivileged user can now read the table
 verifyAllowed(readTable(TABLE_NAME), regularUser1);
+//This verification is added to test PHOENIX-5178
+superUser1.runAs(new PrivilegedExceptionAction() {
+@Override public Void run() throws Exception {
+try {
+if (isNamespaceMapped) {
+grantPermissions(regularUser1.getShortName(),"SYSTEM", 
Action.ADMIN);
+}
+return null;
+} catch (Throwable e) {
+throw new Exception(e);
+}
+
+}
+});
+if(isNamespaceMapped) {
+verifyAllowed(new AccessTestAction() {
+@Override public Object run() throws Exception {
+Properties props = new Properties();
+
props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
Boolean.toString(isNamespaceMapped));
+props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP));
+//Impersonate meta connection
+try (Connection metaConnection = 
DriverManager.getConnection(getUrl(), props);
+Statement stmt = metaConnection.createStatement()) {
+stmt.executeUpdate("CREATE SCHEMA IF NOT EXISTS 
SYSTEM");
+}catch(NewerSchemaAlreadyExistsException e){
+
+}
+return null;
+}
+}, regularUser1);
+}
 }
+
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 0b95b26..06d36d9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1088,7 +1088,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
 keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, 
false));
 }
 Scan scan = new Scan();
-scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp);
+scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
 ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges);
 scanRanges.initializeScan(scan);
 scan.setFilter(scanRanges.getSkipScanFilter());



phoenix git commit: PHOENIX-5010 Don't build client guidepost cache when phoenix.stats.collection.enabled is disabled

2018-11-13 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 f6e860dc9 -> 64afc405a


PHOENIX-5010 Don't build client guidepost cache when 
phoenix.stats.collection.enabled is disabled


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/64afc405
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/64afc405
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/64afc405

Branch: refs/heads/4.x-HBase-1.1
Commit: 64afc405a64200780bc0e6ff6d0087591f71f815
Parents: f6e860d
Author: Ankit Singhal 
Authored: Tue Nov 13 11:36:26 2018 -0800
Committer: Ankit Singhal 
Committed: Tue Nov 13 11:43:16 2018 -0800

--
 .../org/apache/phoenix/query/GuidePostsCache.java | 18 +-
 1 file changed, 17 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/64afc405/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
index d27be1b..1d9fa36 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
@@ -16,6 +16,10 @@
  */
 package org.apache.phoenix.query;
 
+import static org.apache.phoenix.query.QueryServices.STATS_COLLECTION_ENABLED;
+import static org.apache.phoenix.query.QueryServices.STATS_ENABLED_ATTRIB;
+import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_STATS_COLLECTION_ENABLED;
+
 import java.io.IOException;
 import java.util.List;
 import java.util.Objects;
@@ -66,6 +70,8 @@ public class GuidePostsCache {
 final long maxTableStatsCacheSize = config.getLong(
 QueryServices.STATS_MAX_CACHE_SIZE,
 QueryServicesOptions.DEFAULT_STATS_MAX_CACHE_SIZE);
+   final boolean isStatsEnabled = 
config.getBoolean(STATS_COLLECTION_ENABLED, DEFAULT_STATS_COLLECTION_ENABLED)
+   && config.getBoolean(STATS_ENABLED_ATTRIB, 
true);
 cache = CacheBuilder.newBuilder()
 // Expire entries a given amount of time after they were 
written
 .expireAfterWrite(statsUpdateFrequency, TimeUnit.MILLISECONDS)
@@ -80,7 +86,7 @@ public class GuidePostsCache {
 // Log removals at TRACE for debugging
 .removalListener(new PhoenixStatsCacheRemovalListener())
 // Automatically load the cache when entries are missing
-.build(new StatsLoader());
+.build(isStatsEnabled ? new StatsLoader() : new 
EmptyStatsLoader());
 }
 
 /**
@@ -129,6 +135,16 @@ public class GuidePostsCache {
 }
 
 /**
+ * Empty stats loader if stats are disabled
+ */
+   protected class EmptyStatsLoader extends CacheLoader {
+   @Override
+   public GuidePostsInfo load(GuidePostsKey statsKey) throws 
Exception {
+   return GuidePostsInfo.NO_GUIDEPOST;
+   }
+   }
+
+/**
  * Returns the underlying cache. Try to use the provided methods instead 
of accessing the cache
  * directly.
  */



phoenix git commit: PHOENIX-5010 Don't build client guidepost cache when phoenix.stats.collection.enabled is disabled

2018-11-13 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 526de5336 -> 798aaeedb


PHOENIX-5010 Don't build client guidepost cache when 
phoenix.stats.collection.enabled is disabled


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/798aaeed
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/798aaeed
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/798aaeed

Branch: refs/heads/4.x-HBase-1.2
Commit: 798aaeedb6ab4a52019bd32443397f8c1dcd08bf
Parents: 526de53
Author: Ankit Singhal 
Authored: Tue Nov 13 11:36:26 2018 -0800
Committer: Ankit Singhal 
Committed: Tue Nov 13 11:41:50 2018 -0800

--
 .../org/apache/phoenix/query/GuidePostsCache.java | 18 +-
 1 file changed, 17 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/798aaeed/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
index d27be1b..1d9fa36 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
@@ -16,6 +16,10 @@
  */
 package org.apache.phoenix.query;
 
+import static org.apache.phoenix.query.QueryServices.STATS_COLLECTION_ENABLED;
+import static org.apache.phoenix.query.QueryServices.STATS_ENABLED_ATTRIB;
+import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_STATS_COLLECTION_ENABLED;
+
 import java.io.IOException;
 import java.util.List;
 import java.util.Objects;
@@ -66,6 +70,8 @@ public class GuidePostsCache {
 final long maxTableStatsCacheSize = config.getLong(
 QueryServices.STATS_MAX_CACHE_SIZE,
 QueryServicesOptions.DEFAULT_STATS_MAX_CACHE_SIZE);
+   final boolean isStatsEnabled = 
config.getBoolean(STATS_COLLECTION_ENABLED, DEFAULT_STATS_COLLECTION_ENABLED)
+   && config.getBoolean(STATS_ENABLED_ATTRIB, 
true);
 cache = CacheBuilder.newBuilder()
 // Expire entries a given amount of time after they were 
written
 .expireAfterWrite(statsUpdateFrequency, TimeUnit.MILLISECONDS)
@@ -80,7 +86,7 @@ public class GuidePostsCache {
 // Log removals at TRACE for debugging
 .removalListener(new PhoenixStatsCacheRemovalListener())
 // Automatically load the cache when entries are missing
-.build(new StatsLoader());
+.build(isStatsEnabled ? new StatsLoader() : new 
EmptyStatsLoader());
 }
 
 /**
@@ -129,6 +135,16 @@ public class GuidePostsCache {
 }
 
 /**
+ * Empty stats loader if stats are disabled
+ */
+   protected class EmptyStatsLoader extends CacheLoader {
+   @Override
+   public GuidePostsInfo load(GuidePostsKey statsKey) throws 
Exception {
+   return GuidePostsInfo.NO_GUIDEPOST;
+   }
+   }
+
+/**
  * Returns the underlying cache. Try to use the provided methods instead 
of accessing the cache
  * directly.
  */



phoenix git commit: PHOENIX-5010 Don't build client guidepost cache when phoenix.stats.collection.enabled is disabled

2018-11-13 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 169270daa -> 8cd7898a1


PHOENIX-5010 Don't build client guidepost cache when 
phoenix.stats.collection.enabled is disabled


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8cd7898a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8cd7898a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8cd7898a

Branch: refs/heads/4.x-HBase-1.3
Commit: 8cd7898a1acf1368a05a2e893253cecc53416080
Parents: 169270d
Author: Ankit Singhal 
Authored: Tue Nov 13 11:39:58 2018 -0800
Committer: Ankit Singhal 
Committed: Tue Nov 13 11:39:58 2018 -0800

--
 .../org/apache/phoenix/query/GuidePostsCache.java | 18 +-
 1 file changed, 17 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8cd7898a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
index d27be1b..1d9fa36 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
@@ -16,6 +16,10 @@
  */
 package org.apache.phoenix.query;
 
+import static org.apache.phoenix.query.QueryServices.STATS_COLLECTION_ENABLED;
+import static org.apache.phoenix.query.QueryServices.STATS_ENABLED_ATTRIB;
+import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_STATS_COLLECTION_ENABLED;
+
 import java.io.IOException;
 import java.util.List;
 import java.util.Objects;
@@ -66,6 +70,8 @@ public class GuidePostsCache {
 final long maxTableStatsCacheSize = config.getLong(
 QueryServices.STATS_MAX_CACHE_SIZE,
 QueryServicesOptions.DEFAULT_STATS_MAX_CACHE_SIZE);
+   final boolean isStatsEnabled = 
config.getBoolean(STATS_COLLECTION_ENABLED, DEFAULT_STATS_COLLECTION_ENABLED)
+   && config.getBoolean(STATS_ENABLED_ATTRIB, 
true);
 cache = CacheBuilder.newBuilder()
 // Expire entries a given amount of time after they were 
written
 .expireAfterWrite(statsUpdateFrequency, TimeUnit.MILLISECONDS)
@@ -80,7 +86,7 @@ public class GuidePostsCache {
 // Log removals at TRACE for debugging
 .removalListener(new PhoenixStatsCacheRemovalListener())
 // Automatically load the cache when entries are missing
-.build(new StatsLoader());
+.build(isStatsEnabled ? new StatsLoader() : new 
EmptyStatsLoader());
 }
 
 /**
@@ -129,6 +135,16 @@ public class GuidePostsCache {
 }
 
 /**
+ * Empty stats loader if stats are disabled
+ */
+   protected class EmptyStatsLoader extends CacheLoader {
+   @Override
+   public GuidePostsInfo load(GuidePostsKey statsKey) throws 
Exception {
+   return GuidePostsInfo.NO_GUIDEPOST;
+   }
+   }
+
+/**
  * Returns the underlying cache. Try to use the provided methods instead 
of accessing the cache
  * directly.
  */



phoenix git commit: PHOENIX-5010 Don't build client guidepost cache when phoenix.stats.collection.enabled is disabled

2018-11-13 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 70a699e30 -> ce89c2c1d


PHOENIX-5010 Don't build client guidepost cache when 
phoenix.stats.collection.enabled is disabled


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ce89c2c1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ce89c2c1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ce89c2c1

Branch: refs/heads/4.x-HBase-1.4
Commit: ce89c2c1da6e9d81570b18d7b252b0cc24afbcd8
Parents: 70a699e
Author: Ankit Singhal 
Authored: Tue Nov 13 11:36:26 2018 -0800
Committer: Ankit Singhal 
Committed: Tue Nov 13 11:37:35 2018 -0800

--
 .../org/apache/phoenix/query/GuidePostsCache.java | 18 +-
 1 file changed, 17 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ce89c2c1/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
index d27be1b..1d9fa36 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
@@ -16,6 +16,10 @@
  */
 package org.apache.phoenix.query;
 
+import static org.apache.phoenix.query.QueryServices.STATS_COLLECTION_ENABLED;
+import static org.apache.phoenix.query.QueryServices.STATS_ENABLED_ATTRIB;
+import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_STATS_COLLECTION_ENABLED;
+
 import java.io.IOException;
 import java.util.List;
 import java.util.Objects;
@@ -66,6 +70,8 @@ public class GuidePostsCache {
 final long maxTableStatsCacheSize = config.getLong(
 QueryServices.STATS_MAX_CACHE_SIZE,
 QueryServicesOptions.DEFAULT_STATS_MAX_CACHE_SIZE);
+   final boolean isStatsEnabled = 
config.getBoolean(STATS_COLLECTION_ENABLED, DEFAULT_STATS_COLLECTION_ENABLED)
+   && config.getBoolean(STATS_ENABLED_ATTRIB, 
true);
 cache = CacheBuilder.newBuilder()
 // Expire entries a given amount of time after they were 
written
 .expireAfterWrite(statsUpdateFrequency, TimeUnit.MILLISECONDS)
@@ -80,7 +86,7 @@ public class GuidePostsCache {
 // Log removals at TRACE for debugging
 .removalListener(new PhoenixStatsCacheRemovalListener())
 // Automatically load the cache when entries are missing
-.build(new StatsLoader());
+.build(isStatsEnabled ? new StatsLoader() : new 
EmptyStatsLoader());
 }
 
 /**
@@ -129,6 +135,16 @@ public class GuidePostsCache {
 }
 
 /**
+ * Empty stats loader if stats are disabled
+ */
+   protected class EmptyStatsLoader extends CacheLoader {
+   @Override
+   public GuidePostsInfo load(GuidePostsKey statsKey) throws 
Exception {
+   return GuidePostsInfo.NO_GUIDEPOST;
+   }
+   }
+
+/**
  * Returns the underlying cache. Try to use the provided methods instead 
of accessing the cache
  * directly.
  */



phoenix git commit: PHOENIX-5010 Don't build client guidepost cache when phoenix.stats.collection.enabled is disabled

2018-11-13 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/master d9d8fd0c4 -> 4b4466f9b


PHOENIX-5010 Don't build client guidepost cache when 
phoenix.stats.collection.enabled is disabled


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4b4466f9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4b4466f9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4b4466f9

Branch: refs/heads/master
Commit: 4b4466f9bfdddac7c3e9c70b213da1a42ed2d93e
Parents: d9d8fd0
Author: Ankit Singhal 
Authored: Tue Nov 13 11:36:26 2018 -0800
Committer: Ankit Singhal 
Committed: Tue Nov 13 11:36:26 2018 -0800

--
 .../org/apache/phoenix/query/GuidePostsCache.java | 18 +-
 1 file changed, 17 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b4466f9/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
index 7c57122..b78879b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
@@ -16,6 +16,10 @@
  */
 package org.apache.phoenix.query;
 
+import static org.apache.phoenix.query.QueryServices.STATS_COLLECTION_ENABLED;
+import static org.apache.phoenix.query.QueryServices.STATS_ENABLED_ATTRIB;
+import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_STATS_COLLECTION_ENABLED;
+
 import java.io.IOException;
 import java.util.List;
 import java.util.Objects;
@@ -66,6 +70,8 @@ public class GuidePostsCache {
 final long maxTableStatsCacheSize = config.getLong(
 QueryServices.STATS_MAX_CACHE_SIZE,
 QueryServicesOptions.DEFAULT_STATS_MAX_CACHE_SIZE);
+   final boolean isStatsEnabled = 
config.getBoolean(STATS_COLLECTION_ENABLED, DEFAULT_STATS_COLLECTION_ENABLED)
+   && config.getBoolean(STATS_ENABLED_ATTRIB, 
true);
 cache = CacheBuilder.newBuilder()
 // Expire entries a given amount of time after they were 
written
 .expireAfterWrite(statsUpdateFrequency, TimeUnit.MILLISECONDS)
@@ -80,7 +86,7 @@ public class GuidePostsCache {
 // Log removals at TRACE for debugging
 .removalListener(new PhoenixStatsCacheRemovalListener())
 // Automatically load the cache when entries are missing
-.build(new StatsLoader());
+.build(isStatsEnabled ? new StatsLoader() : new 
EmptyStatsLoader());
 }
 
 /**
@@ -129,6 +135,16 @@ public class GuidePostsCache {
 }
 
 /**
+ * Empty stats loader if stats are disabled
+ */
+   protected class EmptyStatsLoader extends CacheLoader {
+   @Override
+   public GuidePostsInfo load(GuidePostsKey statsKey) throws 
Exception {
+   return GuidePostsInfo.NO_GUIDEPOST;
+   }
+   }
+
+/**
  * Returns the underlying cache. Try to use the provided methods instead 
of accessing the cache
  * directly.
  */



phoenix git commit: Revert "PHOENIX-4825 Replace usage of HBase Base64 implementation with java.util.Base64"

2018-10-05 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 ec3542d6f -> 2e402071d


Revert "PHOENIX-4825 Replace usage of HBase Base64 implementation with 
java.util.Base64"

This reverts commit 22934e5af7af79580bf54feeb7667eccafaafc71 in order to 
support JDK 1.7 for 4.x releases.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2e402071
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2e402071
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2e402071

Branch: refs/heads/4.x-HBase-1.2
Commit: 2e402071d06894df33bc419e9936926f1fb749b0
Parents: ec3542d
Author: Ankit Singhal 
Authored: Fri Oct 5 16:53:31 2018 -0700
Committer: Ankit Singhal 
Committed: Fri Oct 5 16:53:31 2018 -0700

--
 .../org/apache/phoenix/end2end/QueryMoreIT.java |  7 ++---
 .../phoenix/mapreduce/CsvBulkImportUtil.java|  8 ++
 .../util/PhoenixConfigurationUtil.java  |  7 ++---
 .../apache/phoenix/schema/types/PVarbinary.java |  4 +--
 .../phoenix/util/csv/CsvUpsertExecutor.java |  4 +--
 .../phoenix/util/json/JsonUpsertExecutor.java   |  4 +--
 .../util/AbstractUpsertExecutorTest.java| 12 
 .../util/TenantIdByteConversionTest.java| 30 
 8 files changed, 26 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2e402071/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 528fe7f..04272fa 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -31,13 +31,12 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
-import java.util.Base64;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Base64;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
@@ -279,7 +278,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 values[i] = rs.getObject(i + 1);
 }
 conn = getTenantSpecificConnection(tenantId);
-
pkIds.add(Bytes.toString(Base64.getEncoder().encode(PhoenixRuntime.encodeColumnValues(conn,
 tableOrViewName.toUpperCase(), values, columns;
+
pkIds.add(Base64.encodeBytes(PhoenixRuntime.encodeColumnValues(conn, 
tableOrViewName.toUpperCase(), values, columns)));
 }
 return pkIds.toArray(new String[pkIds.size()]);
 }
@@ -297,7 +296,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 PreparedStatement stmt = conn.prepareStatement(query);
 int bindCounter = 1;
 for (int i = 0; i < cursorIds.length; i++) {
-Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.getDecoder().decode(cursorIds[i]), columns);
+Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.decode(cursorIds[i]), columns);
 for (int j = 0; j < pkParts.length; j++) {
 stmt.setObject(bindCounter++, pkParts[j]);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2e402071/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
index bf5a538..ff9ff72 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
@@ -17,11 +17,9 @@
  */
 package org.apache.phoenix.mapreduce;
 
-import java.util.Base64;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Base64;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
@@ -70,7 +68,7 @@ public class CsvBulkImportUtil {
 
 @VisibleForTesting
 static void setChar(Configuration conf, String confKey, char charValue) {
-conf.set(confKey, 
Bytes.toString(Base64.getEncoder().encode

phoenix git commit: Revert "PHOENIX-4825 Replace usage of HBase Base64 implementation with java.util.Base64"

2018-10-05 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 e62be9c82 -> 8c76e7c97


Revert "PHOENIX-4825 Replace usage of HBase Base64 implementation with 
java.util.Base64"

This reverts commit 22934e5af7af79580bf54feeb7667eccafaafc71 in order to 
support JDK 1.7 for 4.x releases.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8c76e7c9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8c76e7c9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8c76e7c9

Branch: refs/heads/4.x-HBase-1.3
Commit: 8c76e7c9775f8695a513168ac3ba3db467d54482
Parents: e62be9c
Author: Ankit Singhal 
Authored: Fri Oct 5 16:51:37 2018 -0700
Committer: Ankit Singhal 
Committed: Fri Oct 5 16:51:37 2018 -0700

--
 .../org/apache/phoenix/end2end/QueryMoreIT.java |  7 ++---
 .../phoenix/mapreduce/CsvBulkImportUtil.java|  8 ++
 .../util/PhoenixConfigurationUtil.java  |  7 ++---
 .../apache/phoenix/schema/types/PVarbinary.java |  4 +--
 .../phoenix/util/csv/CsvUpsertExecutor.java |  4 +--
 .../phoenix/util/json/JsonUpsertExecutor.java   |  4 +--
 .../util/AbstractUpsertExecutorTest.java| 12 
 .../util/TenantIdByteConversionTest.java| 30 
 8 files changed, 26 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c76e7c9/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 528fe7f..04272fa 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -31,13 +31,12 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
-import java.util.Base64;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Base64;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
@@ -279,7 +278,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 values[i] = rs.getObject(i + 1);
 }
 conn = getTenantSpecificConnection(tenantId);
-
pkIds.add(Bytes.toString(Base64.getEncoder().encode(PhoenixRuntime.encodeColumnValues(conn,
 tableOrViewName.toUpperCase(), values, columns;
+
pkIds.add(Base64.encodeBytes(PhoenixRuntime.encodeColumnValues(conn, 
tableOrViewName.toUpperCase(), values, columns)));
 }
 return pkIds.toArray(new String[pkIds.size()]);
 }
@@ -297,7 +296,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 PreparedStatement stmt = conn.prepareStatement(query);
 int bindCounter = 1;
 for (int i = 0; i < cursorIds.length; i++) {
-Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.getDecoder().decode(cursorIds[i]), columns);
+Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.decode(cursorIds[i]), columns);
 for (int j = 0; j < pkParts.length; j++) {
 stmt.setObject(bindCounter++, pkParts[j]);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8c76e7c9/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
index bf5a538..ff9ff72 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
@@ -17,11 +17,9 @@
  */
 package org.apache.phoenix.mapreduce;
 
-import java.util.Base64;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Base64;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
@@ -70,7 +68,7 @@ public class CsvBulkImportUtil {
 
 @VisibleForTesting
 static void setChar(Configuration conf, String confKey, char charValue) {
-conf.set(confKey, 
Bytes.toString(Base64.getEncoder().encode

phoenix git commit: Revert "PHOENIX-4825 Replace usage of HBase Base64 implementation with java.util.Base64"

2018-10-05 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 a00949914 -> 17f00f1b8


Revert "PHOENIX-4825 Replace usage of HBase Base64 implementation with 
java.util.Base64"

This reverts commit 22934e5af7af79580bf54feeb7667eccafaafc71 in order to 
support JDK 1.7 for 4.x releases.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/17f00f1b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/17f00f1b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/17f00f1b

Branch: refs/heads/4.x-HBase-1.4
Commit: 17f00f1b89a88d8632ab2f4aac1608d0b2d0d209
Parents: a009499
Author: Ankit Singhal 
Authored: Fri Oct 5 16:49:56 2018 -0700
Committer: Ankit Singhal 
Committed: Fri Oct 5 16:49:56 2018 -0700

--
 .../org/apache/phoenix/end2end/QueryMoreIT.java |  7 ++---
 .../phoenix/mapreduce/CsvBulkImportUtil.java|  8 ++
 .../util/PhoenixConfigurationUtil.java  |  7 ++---
 .../apache/phoenix/schema/types/PVarbinary.java |  4 +--
 .../phoenix/util/csv/CsvUpsertExecutor.java |  4 +--
 .../phoenix/util/json/JsonUpsertExecutor.java   |  4 +--
 .../util/AbstractUpsertExecutorTest.java| 12 
 .../util/TenantIdByteConversionTest.java| 30 
 8 files changed, 26 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/17f00f1b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 528fe7f..04272fa 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -31,13 +31,12 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
-import java.util.Base64;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Base64;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
@@ -279,7 +278,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 values[i] = rs.getObject(i + 1);
 }
 conn = getTenantSpecificConnection(tenantId);
-
pkIds.add(Bytes.toString(Base64.getEncoder().encode(PhoenixRuntime.encodeColumnValues(conn,
 tableOrViewName.toUpperCase(), values, columns;
+
pkIds.add(Base64.encodeBytes(PhoenixRuntime.encodeColumnValues(conn, 
tableOrViewName.toUpperCase(), values, columns)));
 }
 return pkIds.toArray(new String[pkIds.size()]);
 }
@@ -297,7 +296,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 PreparedStatement stmt = conn.prepareStatement(query);
 int bindCounter = 1;
 for (int i = 0; i < cursorIds.length; i++) {
-Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.getDecoder().decode(cursorIds[i]), columns);
+Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.decode(cursorIds[i]), columns);
 for (int j = 0; j < pkParts.length; j++) {
 stmt.setObject(bindCounter++, pkParts[j]);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/17f00f1b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
index bf5a538..ff9ff72 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
@@ -17,11 +17,9 @@
  */
 package org.apache.phoenix.mapreduce;
 
-import java.util.Base64;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Base64;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
@@ -70,7 +68,7 @@ public class CsvBulkImportUtil {
 
 @VisibleForTesting
 static void setChar(Configuration conf, String confKey, char charValue) {
-conf.set(confKey, 
Bytes.toString(Base64.getEncoder().encode

phoenix git commit: PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for SYSTEM.MUTEX table(addendum)

2018-10-02 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/master 61250b5ca -> ecb6bc995


PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for 
SYSTEM.MUTEX table(addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ecb6bc99
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ecb6bc99
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ecb6bc99

Branch: refs/heads/master
Commit: ecb6bc995c478f2d58df093b48421564449c38b2
Parents: 61250b5
Author: Ankit Singhal 
Authored: Tue Oct 2 12:30:09 2018 -0700
Committer: Ankit Singhal 
Committed: Tue Oct 2 12:30:09 2018 -0700

--
 .../phoenix/query/ConnectionQueryServicesImpl.java   | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ecb6bc99/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index daecf1f..f1e3d82 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2601,12 +2601,15 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean foundAccessDeniedException = false;
 // when running spark/map reduce jobs the 
ADE might be wrapped
 // in a RemoteException
-if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(AccessDeniedException.class))) {
+if (inspectIfAnyExceptionInChain(e, 
Collections
+.> 
singletonList(AccessDeniedException.class))) {
 // Pass
 logger.warn("Could not check for 
Phoenix SYSTEM tables, assuming they exist and are properly configured");
 
checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES,
 getProps()).getName());
 success = true;
-} else if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(NamespaceNotFoundException.class))) {
+} else if (inspectIfAnyExceptionInChain(e,
+Collections.> singletonList(
+
NamespaceNotFoundException.class))) {
 // This exception is only possible if 
SYSTEM namespace mapping is enabled and SYSTEM namespace is missing
 // It implies that SYSTEM tables are 
not created and hence we shouldn't provide a connection
 AccessDeniedException ade = new 
AccessDeniedException("Insufficient permissions to create SYSTEM namespace and 
SYSTEM Tables");
@@ -2705,7 +2708,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 admin.createTable(tableDesc);
 }
 catch (IOException e) {
-if (inspectIfAnyExceptionInChain(e, Arrays.> asList(
+if (inspectIfAnyExceptionInChain(e, Arrays.> asList(
 AccessDeniedException.class, 
org.apache.hadoop.hbase.TableExistsException.class))) {
 // Ignore TableExistsException as another client might beat us 
during upgrade.
 // Ignore AccessDeniedException, as it may be possible 
underpriviliged user trying to use the connection
@@ -2718,10 +2721,10 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 }
 }
 
-private boolean inspectIfAnyExceptionInChain(Throwable io, List> ioList) {
+private boolean inspectIfAnyExceptionInChain(Throwable io, List> ioList) {
 boolean exceptionToIgnore = false;
 for (Throwable t : Throwables.getCausalChain(io)) {
-for (Class exception : ioList) {
+for (Class exception : ioList) {
 exceptionToIgnore |= isExceptionInstanceOf(t, exception);
 }
 if (exceptionToIgnore) {
@@ -2732,7 +2735,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 return exceptionToIgnore;
 }
 
-private boolean isExceptionInstanceOf(Throwab

phoenix git commit: PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for SYSTEM.MUTEX table(addendum)

2018-10-02 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 5635204c6 -> be54de417


PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for 
SYSTEM.MUTEX table(addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/be54de41
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/be54de41
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/be54de41

Branch: refs/heads/4.x-HBase-1.2
Commit: be54de4170abb978c75ca9ec22d54355ab23abc5
Parents: 5635204
Author: Ankit Singhal 
Authored: Tue Oct 2 12:29:04 2018 -0700
Committer: Ankit Singhal 
Committed: Tue Oct 2 12:29:04 2018 -0700

--
 .../phoenix/query/ConnectionQueryServicesImpl.java   | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/be54de41/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index d2ece24..dbfd461 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2568,12 +2568,15 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean foundAccessDeniedException = false;
 // when running spark/map reduce jobs the 
ADE might be wrapped
 // in a RemoteException
-if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(AccessDeniedException.class))) {
+if (inspectIfAnyExceptionInChain(e, 
Collections
+.> 
singletonList(AccessDeniedException.class))) {
 // Pass
 logger.warn("Could not check for 
Phoenix SYSTEM tables, assuming they exist and are properly configured");
 
checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES,
 getProps()).getName());
 success = true;
-} else if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(NamespaceNotFoundException.class))) {
+} else if (inspectIfAnyExceptionInChain(e,
+Collections.> singletonList(
+
NamespaceNotFoundException.class))) {
 // This exception is only possible if 
SYSTEM namespace mapping is enabled and SYSTEM namespace is missing
 // It implies that SYSTEM tables are 
not created and hence we shouldn't provide a connection
 AccessDeniedException ade = new 
AccessDeniedException("Insufficient permissions to create SYSTEM namespace and 
SYSTEM Tables");
@@ -2670,7 +2673,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 admin.createTable(tableDesc);
 }
 catch (IOException e) {
-if (inspectIfAnyExceptionInChain(e, Arrays.> asList(
+if (inspectIfAnyExceptionInChain(e, Arrays.> asList(
 AccessDeniedException.class, 
org.apache.hadoop.hbase.TableExistsException.class))) {
 // Ignore TableExistsException as another client might beat us 
during upgrade.
 // Ignore AccessDeniedException, as it may be possible 
underpriviliged user trying to use the connection
@@ -2683,10 +2686,10 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 }
 }
 
-private boolean inspectIfAnyExceptionInChain(Throwable io, List> ioList) {
+private boolean inspectIfAnyExceptionInChain(Throwable io, List> ioList) {
 boolean exceptionToIgnore = false;
 for (Throwable t : Throwables.getCausalChain(io)) {
-for (Class exception : ioList) {
+for (Class exception : ioList) {
 exceptionToIgnore |= isExceptionInstanceOf(t, exception);
 }
 if (exceptionToIgnore) {
@@ -2697,7 +2700,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 return exceptionToIgnore;
 }
 
-private boolean isExc

phoenix git commit: PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for SYSTEM.MUTEX table(addendum)

2018-10-02 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 d6277078f -> 17c2681ca


PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for 
SYSTEM.MUTEX table(addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/17c2681c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/17c2681c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/17c2681c

Branch: refs/heads/4.x-HBase-1.4
Commit: 17c2681caccd26442ce9b68dcb858314083b0408
Parents: d627707
Author: Ankit Singhal 
Authored: Tue Oct 2 12:28:48 2018 -0700
Committer: Ankit Singhal 
Committed: Tue Oct 2 12:28:48 2018 -0700

--
 .../phoenix/query/ConnectionQueryServicesImpl.java   | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/17c2681c/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index d2ece24..dbfd461 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2568,12 +2568,15 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean foundAccessDeniedException = false;
 // when running spark/map reduce jobs the 
ADE might be wrapped
 // in a RemoteException
-if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(AccessDeniedException.class))) {
+if (inspectIfAnyExceptionInChain(e, 
Collections
+.> 
singletonList(AccessDeniedException.class))) {
 // Pass
 logger.warn("Could not check for 
Phoenix SYSTEM tables, assuming they exist and are properly configured");
 
checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES,
 getProps()).getName());
 success = true;
-} else if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(NamespaceNotFoundException.class))) {
+} else if (inspectIfAnyExceptionInChain(e,
+Collections.> singletonList(
+
NamespaceNotFoundException.class))) {
 // This exception is only possible if 
SYSTEM namespace mapping is enabled and SYSTEM namespace is missing
 // It implies that SYSTEM tables are 
not created and hence we shouldn't provide a connection
 AccessDeniedException ade = new 
AccessDeniedException("Insufficient permissions to create SYSTEM namespace and 
SYSTEM Tables");
@@ -2670,7 +2673,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 admin.createTable(tableDesc);
 }
 catch (IOException e) {
-if (inspectIfAnyExceptionInChain(e, Arrays.> asList(
+if (inspectIfAnyExceptionInChain(e, Arrays.> asList(
 AccessDeniedException.class, 
org.apache.hadoop.hbase.TableExistsException.class))) {
 // Ignore TableExistsException as another client might beat us 
during upgrade.
 // Ignore AccessDeniedException, as it may be possible 
underpriviliged user trying to use the connection
@@ -2683,10 +2686,10 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 }
 }
 
-private boolean inspectIfAnyExceptionInChain(Throwable io, List> ioList) {
+private boolean inspectIfAnyExceptionInChain(Throwable io, List> ioList) {
 boolean exceptionToIgnore = false;
 for (Throwable t : Throwables.getCausalChain(io)) {
-for (Class exception : ioList) {
+for (Class exception : ioList) {
 exceptionToIgnore |= isExceptionInstanceOf(t, exception);
 }
 if (exceptionToIgnore) {
@@ -2697,7 +2700,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 return exceptionToIgnore;
 }
 
-private boolean isExc

phoenix git commit: PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for SYSTEM.MUTEX table(addendum)

2018-10-02 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 27905e36e -> 4d5d68fea


PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for 
SYSTEM.MUTEX table(addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4d5d68fe
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4d5d68fe
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4d5d68fe

Branch: refs/heads/4.x-HBase-1.3
Commit: 4d5d68fea87bd00ebd6cfb76e1e6b71e257aba0a
Parents: 27905e3
Author: Ankit Singhal 
Authored: Tue Oct 2 12:28:00 2018 -0700
Committer: Ankit Singhal 
Committed: Tue Oct 2 12:28:00 2018 -0700

--
 .../phoenix/query/ConnectionQueryServicesImpl.java   | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4d5d68fe/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index d2ece24..dbfd461 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2568,12 +2568,15 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean foundAccessDeniedException = false;
 // when running spark/map reduce jobs the 
ADE might be wrapped
 // in a RemoteException
-if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(AccessDeniedException.class))) {
+if (inspectIfAnyExceptionInChain(e, 
Collections
+.> 
singletonList(AccessDeniedException.class))) {
 // Pass
 logger.warn("Could not check for 
Phoenix SYSTEM tables, assuming they exist and are properly configured");
 
checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES,
 getProps()).getName());
 success = true;
-} else if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(NamespaceNotFoundException.class))) {
+} else if (inspectIfAnyExceptionInChain(e,
+Collections.> singletonList(
+
NamespaceNotFoundException.class))) {
 // This exception is only possible if 
SYSTEM namespace mapping is enabled and SYSTEM namespace is missing
 // It implies that SYSTEM tables are 
not created and hence we shouldn't provide a connection
 AccessDeniedException ade = new 
AccessDeniedException("Insufficient permissions to create SYSTEM namespace and 
SYSTEM Tables");
@@ -2670,7 +2673,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 admin.createTable(tableDesc);
 }
 catch (IOException e) {
-if (inspectIfAnyExceptionInChain(e, Arrays.> asList(
+if (inspectIfAnyExceptionInChain(e, Arrays.> asList(
 AccessDeniedException.class, 
org.apache.hadoop.hbase.TableExistsException.class))) {
 // Ignore TableExistsException as another client might beat us 
during upgrade.
 // Ignore AccessDeniedException, as it may be possible 
underpriviliged user trying to use the connection
@@ -2683,10 +2686,10 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 }
 }
 
-private boolean inspectIfAnyExceptionInChain(Throwable io, List> ioList) {
+private boolean inspectIfAnyExceptionInChain(Throwable io, List> ioList) {
 boolean exceptionToIgnore = false;
 for (Throwable t : Throwables.getCausalChain(io)) {
-for (Class exception : ioList) {
+for (Class exception : ioList) {
 exceptionToIgnore |= isExceptionInstanceOf(t, exception);
 }
 if (exceptionToIgnore) {
@@ -2697,7 +2700,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 return exceptionToIgnore;
 }
 
-private boolean isExc

phoenix git commit: PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for SYSTEM.MUTEX table

2018-10-02 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 caca3 -> d6277078f


PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for 
SYSTEM.MUTEX table


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d6277078
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d6277078
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d6277078

Branch: refs/heads/4.x-HBase-1.4
Commit: d6277078fe494ca3fc3bd5a2460859d59186e3cf
Parents: cac
Author: Ankit Singhal 
Authored: Tue Oct 2 12:10:35 2018 -0700
Committer: Ankit Singhal 
Committed: Tue Oct 2 12:10:35 2018 -0700

--
 .../query/ConnectionQueryServicesImpl.java  | 47 +---
 1 file changed, 31 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d6277078/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 736df6d..d2ece24 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -71,6 +71,7 @@ import java.sql.ResultSetMetaData;
 import java.sql.SQLException;
 import java.sql.Types;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -2567,22 +2568,12 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean foundAccessDeniedException = false;
 // when running spark/map reduce jobs the 
ADE might be wrapped
 // in a RemoteException
-for (Throwable t : 
Throwables.getCausalChain(e)) {
-if (t instanceof AccessDeniedException
-|| (t instanceof 
RemoteException
-&& ((RemoteException) 
t).getClassName()
-
.equals(AccessDeniedException.class
-
.getName( {
-foundAccessDeniedException = true;
-break;
-}
-}
-if (foundAccessDeniedException) {
+if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(AccessDeniedException.class))) {
 // Pass
 logger.warn("Could not check for 
Phoenix SYSTEM tables, assuming they exist and are properly configured");
 
checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES,
 getProps()).getName());
 success = true;
-} else if 
(!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), 
NamespaceNotFoundException.class))) {
+} else if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(NamespaceNotFoundException.class))) {
 // This exception is only possible if 
SYSTEM namespace mapping is enabled and SYSTEM namespace is missing
 // It implies that SYSTEM tables are 
not created and hence we shouldn't provide a connection
 AccessDeniedException ade = new 
AccessDeniedException("Insufficient permissions to create SYSTEM namespace and 
SYSTEM Tables");
@@ -2677,15 +2668,39 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 columnDesc.setTimeToLive(TTL_FOR_MUTEX); // Let mutex expire after 
some time
 tableDesc.addFamily(columnDesc);
 admin.createTable(tableDesc);
-} catch (IOException e) {
-
if(!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), 
AccessDeniedException.class)) ||
-
!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), 
org.apache.hadoop.hbase.TableNotFoundException.class))) {
-// Ignore
+}
+cat

phoenix git commit: PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for SYSTEM.MUTEX table

2018-10-02 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 5255b4aa6 -> 5635204c6


PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for 
SYSTEM.MUTEX table


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5635204c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5635204c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5635204c

Branch: refs/heads/4.x-HBase-1.2
Commit: 5635204c6302425fc424537a4d713f33ba124e52
Parents: 5255b4a
Author: Ankit Singhal 
Authored: Tue Oct 2 12:12:07 2018 -0700
Committer: Ankit Singhal 
Committed: Tue Oct 2 12:12:07 2018 -0700

--
 .../query/ConnectionQueryServicesImpl.java  | 47 +---
 1 file changed, 31 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5635204c/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 736df6d..d2ece24 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -71,6 +71,7 @@ import java.sql.ResultSetMetaData;
 import java.sql.SQLException;
 import java.sql.Types;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -2567,22 +2568,12 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean foundAccessDeniedException = false;
 // when running spark/map reduce jobs the 
ADE might be wrapped
 // in a RemoteException
-for (Throwable t : 
Throwables.getCausalChain(e)) {
-if (t instanceof AccessDeniedException
-|| (t instanceof 
RemoteException
-&& ((RemoteException) 
t).getClassName()
-
.equals(AccessDeniedException.class
-
.getName( {
-foundAccessDeniedException = true;
-break;
-}
-}
-if (foundAccessDeniedException) {
+if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(AccessDeniedException.class))) {
 // Pass
 logger.warn("Could not check for 
Phoenix SYSTEM tables, assuming they exist and are properly configured");
 
checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES,
 getProps()).getName());
 success = true;
-} else if 
(!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), 
NamespaceNotFoundException.class))) {
+} else if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(NamespaceNotFoundException.class))) {
 // This exception is only possible if 
SYSTEM namespace mapping is enabled and SYSTEM namespace is missing
 // It implies that SYSTEM tables are 
not created and hence we shouldn't provide a connection
 AccessDeniedException ade = new 
AccessDeniedException("Insufficient permissions to create SYSTEM namespace and 
SYSTEM Tables");
@@ -2677,15 +2668,39 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 columnDesc.setTimeToLive(TTL_FOR_MUTEX); // Let mutex expire after 
some time
 tableDesc.addFamily(columnDesc);
 admin.createTable(tableDesc);
-} catch (IOException e) {
-
if(!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), 
AccessDeniedException.class)) ||
-
!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), 
org.apache.hadoop.hbase.TableNotFoundException.class))) {
-// Ignore
+}
+cat

phoenix git commit: PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for SYSTEM.MUTEX table

2018-10-02 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 df01773ca -> 27905e36e


PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for 
SYSTEM.MUTEX table


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/27905e36
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/27905e36
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/27905e36

Branch: refs/heads/4.x-HBase-1.3
Commit: 27905e36e2126ed8188fcad06f0a48b4e1f93616
Parents: df01773
Author: Ankit Singhal 
Authored: Tue Oct 2 12:11:38 2018 -0700
Committer: Ankit Singhal 
Committed: Tue Oct 2 12:11:38 2018 -0700

--
 .../query/ConnectionQueryServicesImpl.java  | 47 +---
 1 file changed, 31 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/27905e36/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 736df6d..d2ece24 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -71,6 +71,7 @@ import java.sql.ResultSetMetaData;
 import java.sql.SQLException;
 import java.sql.Types;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -2567,22 +2568,12 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean foundAccessDeniedException = false;
 // when running spark/map reduce jobs the 
ADE might be wrapped
 // in a RemoteException
-for (Throwable t : 
Throwables.getCausalChain(e)) {
-if (t instanceof AccessDeniedException
-|| (t instanceof 
RemoteException
-&& ((RemoteException) 
t).getClassName()
-
.equals(AccessDeniedException.class
-
.getName( {
-foundAccessDeniedException = true;
-break;
-}
-}
-if (foundAccessDeniedException) {
+if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(AccessDeniedException.class))) {
 // Pass
 logger.warn("Could not check for 
Phoenix SYSTEM tables, assuming they exist and are properly configured");
 
checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES,
 getProps()).getName());
 success = true;
-} else if 
(!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), 
NamespaceNotFoundException.class))) {
+} else if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(NamespaceNotFoundException.class))) {
 // This exception is only possible if 
SYSTEM namespace mapping is enabled and SYSTEM namespace is missing
 // It implies that SYSTEM tables are 
not created and hence we shouldn't provide a connection
 AccessDeniedException ade = new 
AccessDeniedException("Insufficient permissions to create SYSTEM namespace and 
SYSTEM Tables");
@@ -2677,15 +2668,39 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 columnDesc.setTimeToLive(TTL_FOR_MUTEX); // Let mutex expire after 
some time
 tableDesc.addFamily(columnDesc);
 admin.createTable(tableDesc);
-} catch (IOException e) {
-
if(!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), 
AccessDeniedException.class)) ||
-
!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), 
org.apache.hadoop.hbase.TableNotFoundException.class))) {
-// Ignore
+}
+cat

phoenix git commit: PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for SYSTEM.MUTEX table

2018-10-02 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/master 5016a52e7 -> 61250b5ca


PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for 
SYSTEM.MUTEX table


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/61250b5c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/61250b5c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/61250b5c

Branch: refs/heads/master
Commit: 61250b5ca0abdaf0ed2ddb74bc576e41b6219b10
Parents: 5016a52
Author: Ankit Singhal 
Authored: Tue Oct 2 12:09:28 2018 -0700
Committer: Ankit Singhal 
Committed: Tue Oct 2 12:09:28 2018 -0700

--
 .../query/ConnectionQueryServicesImpl.java  | 47 +---
 1 file changed, 31 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/61250b5c/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 6e27f2a..daecf1f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -72,6 +72,7 @@ import java.sql.ResultSetMetaData;
 import java.sql.SQLException;
 import java.sql.Types;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -2600,22 +2601,12 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean foundAccessDeniedException = false;
 // when running spark/map reduce jobs the 
ADE might be wrapped
 // in a RemoteException
-for (Throwable t : 
Throwables.getCausalChain(e)) {
-if (t instanceof AccessDeniedException
-|| (t instanceof 
RemoteException
-&& ((RemoteException) 
t).getClassName()
-
.equals(AccessDeniedException.class
-
.getName( {
-foundAccessDeniedException = true;
-break;
-}
-}
-if (foundAccessDeniedException) {
+if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(AccessDeniedException.class))) {
 // Pass
 logger.warn("Could not check for 
Phoenix SYSTEM tables, assuming they exist and are properly configured");
 
checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES,
 getProps()).getName());
 success = true;
-} else if 
(!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), 
NamespaceNotFoundException.class))) {
+} else if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(NamespaceNotFoundException.class))) {
 // This exception is only possible if 
SYSTEM namespace mapping is enabled and SYSTEM namespace is missing
 // It implies that SYSTEM tables are 
not created and hence we shouldn't provide a connection
 AccessDeniedException ade = new 
AccessDeniedException("Insufficient permissions to create SYSTEM namespace and 
SYSTEM Tables");
@@ -2712,15 +2703,39 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 .setTimeToLive(TTL_FOR_MUTEX).build())
 .build();
 admin.createTable(tableDesc);
-} catch (IOException e) {
-
if(!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), 
AccessDeniedException.class)) ||
-
!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), 
org.apache.hadoop.hbase.TableNotFoundException.class))) {
-// Ignore
+}
+catch (IOException e) {
+if (inspectIfAn

svn commit: r1841484 - in /phoenix/site: publish/phoenix_spark.html source/src/site/markdown/phoenix_spark.md

2018-09-20 Thread ankit
Author: ankit
Date: Thu Sep 20 17:52:31 2018
New Revision: 1841484

URL: http://svn.apache.org/viewvc?rev=1841484&view=rev
Log:
update save api when using spark dataframe(Sandeep Nemuri)

Modified:
phoenix/site/publish/phoenix_spark.html
phoenix/site/source/src/site/markdown/phoenix_spark.md

Modified: phoenix/site/publish/phoenix_spark.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/phoenix_spark.html?rev=1841484&r1=1841483&r2=1841484&view=diff
==
--- phoenix/site/publish/phoenix_spark.html (original)
+++ phoenix/site/publish/phoenix_spark.html Thu Sep 20 17:52:31 2018
@@ -1,7 +1,7 @@
 
 
 
 
@@ -324,8 +324,16 @@ val df = sqlContext.load("org.apach
   "zkUrl" -> hbaseConnectionString))
 
 // Save to OUTPUT_TABLE
-df.save("org.apache.phoenix.spark", SaveMode.Overwrite, 
Map("table" -> "OUTPUT_TABLE",
-  "zkUrl" -> hbaseConnectionString))
+df.saveToPhoenix(Map("table" -> "OUTPUT_TABLE", 
"zkUrl" -> hbaseConnectionString))
+
+or
+
+df.write \
+ .format("org.apache.phoenix.spark") \
+ .mode("overwrite") \
+ .option("table", "OUTPUT_TABLE") \
+ .option("zkUrl", "localhost:2181") \
+ .save()
  
 


Modified: phoenix/site/source/src/site/markdown/phoenix_spark.md
URL: 
http://svn.apache.org/viewvc/phoenix/site/source/src/site/markdown/phoenix_spark.md?rev=1841484&r1=1841483&r2=1841484&view=diff
==
--- phoenix/site/source/src/site/markdown/phoenix_spark.md (original)
+++ phoenix/site/source/src/site/markdown/phoenix_spark.md Thu Sep 20 17:52:31 
2018
@@ -169,8 +169,16 @@ val df = sqlContext.load("org.apache.pho
   "zkUrl" -> hbaseConnectionString))
 
 // Save to OUTPUT_TABLE
-df.save("org.apache.phoenix.spark", SaveMode.Overwrite, Map("table" -> 
"OUTPUT_TABLE",
-  "zkUrl" -> hbaseConnectionString))
+df.saveToPhoenix(Map("table" -> "OUTPUT_TABLE", "zkUrl" -> 
hbaseConnectionString))
+
+or
+
+df.write \
+ .format("org.apache.phoenix.spark") \
+ .mode("overwrite") \
+ .option("table", "OUTPUT_TABLE") \
+ .option("zkUrl", "localhost:2181") \
+ .save()
 ```
 
 ### PySpark




phoenix git commit: PHOENIX-4839 IndexHalfStoreFileReaderGenerator throws NullPointerException(Aman Poonia)

2018-08-21 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 b03f483b6 -> 0abfd9830


PHOENIX-4839 IndexHalfStoreFileReaderGenerator throws NullPointerException(Aman 
Poonia)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0abfd983
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0abfd983
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0abfd983

Branch: refs/heads/4.x-HBase-1.2
Commit: 0abfd9830e0a91d410173c2c3fd06fb6c52ee62e
Parents: b03f483
Author: Ankit Singhal 
Authored: Tue Aug 21 11:54:01 2018 -0700
Committer: Ankit Singhal 
Committed: Tue Aug 21 11:54:01 2018 -0700

--
 .../regionserver/IndexHalfStoreFileReader.java  |   6 +
 .../IndexHalfStoreFileReaderGenerator.java  | 138 ++-
 2 files changed, 18 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0abfd983/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index d1d12fb..8bd0d72 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -123,4 +123,10 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
 public boolean isTop() {
 return top;
 }
+
+@Override
+public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread, boolean isCompaction, long readPt) {
+return new LocalIndexStoreFileScanner(this, getScanner(cacheBlocks, 
pread, isCompaction), true,
+getHFileReader().hasMVCCInfo(), readPt);
+}
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0abfd983/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index e41086b..ab65456 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -17,16 +17,11 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.SCAN_START_ROW_SUFFIX;
-
 import java.io.IOException;
 import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.NavigableSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -71,7 +66,7 @@ import org.apache.phoenix.util.RepairUtil;
 import com.google.common.collect.Lists;
 
 public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
-
+
 private static final String LOCAL_INDEX_AUTOMATIC_REPAIR = 
"local.index.automatic.repair";
 public static final Log LOG = 
LogFactory.getLog(IndexHalfStoreFileReaderGenerator.class);
 
@@ -153,7 +148,9 @@ public class IndexHalfStoreFileReaderGenerator extends 
BaseRegionObserver {
 try {
 conn = 
QueryUtil.getConnectionOnServer(ctx.getEnvironment().getConfiguration()).unwrap(
 PhoenixConnection.class);
-PTable dataTable = IndexUtil.getPDataTable(conn, 
ctx.getEnvironment().getRegion().getTableDesc());
+PTable dataTable =
+IndexUtil.getPDataTable(conn, 
ctx.getEnvironment().getRegion()
+.getTableDesc());
 List indexes = dataTable.getIndexes();
 Map indexMaintainers =
 new HashMap();
@@ -187,19 +184,12 @@ public class IndexHalfStoreFileReaderGenerator extends 
BaseRegionObserver {
 return reader;
 }
 
-@SuppressWarnings("deprecation")
 @Override
-public InternalScanner 
preCompactScannerOpen(ObserverContext c,
-Store store, List scanners, ScanType 
scanType,
-long earliestPutTs, InternalScanner s, CompactionRequest request) 
throws IOException {
+public InternalScanner preCompact(
+ObserverContext c, Store store,
+InternalScann

phoenix git commit: PHOENIX-4839 IndexHalfStoreFileReaderGenerator throws NullPointerException(Aman Poonia)

2018-08-21 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 f60d11ee5 -> 3b9a108f3


PHOENIX-4839 IndexHalfStoreFileReaderGenerator throws NullPointerException(Aman 
Poonia)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3b9a108f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3b9a108f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3b9a108f

Branch: refs/heads/4.x-HBase-1.4
Commit: 3b9a108f3891601cdfdc88dbad8ff39534cfd8f1
Parents: f60d11e
Author: Ankit Singhal 
Authored: Tue Aug 21 11:52:29 2018 -0700
Committer: Ankit Singhal 
Committed: Tue Aug 21 11:52:29 2018 -0700

--
 .../regionserver/IndexHalfStoreFileReader.java  |   7 +
 .../IndexHalfStoreFileReaderGenerator.java  | 133 ++-
 2 files changed, 17 insertions(+), 123 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3b9a108f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index d1d12fb..1f3113c 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -123,4 +123,11 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
 public boolean isTop() {
 return top;
 }
+
+@Override
+public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread, boolean isCompaction, long readPt,
+long scannerOrder, boolean canOptimizeForNonNullColumn) {
+return new LocalIndexStoreFileScanner(this, getScanner(cacheBlocks, 
pread, isCompaction), true,
+getHFileReader().hasMVCCInfo(), readPt, scannerOrder, 
canOptimizeForNonNullColumn);
+}
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3b9a108f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index 74243e1..bf83147 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -17,16 +17,11 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.SCAN_START_ROW_SUFFIX;
-
 import java.io.IOException;
 import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.NavigableSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -153,7 +148,9 @@ public class IndexHalfStoreFileReaderGenerator extends 
BaseRegionObserver {
 try {
 conn = 
QueryUtil.getConnectionOnServer(ctx.getEnvironment().getConfiguration()).unwrap(
 PhoenixConnection.class);
-PTable dataTable = IndexUtil.getPDataTable(conn, 
ctx.getEnvironment().getRegion().getTableDesc());
+PTable dataTable =
+IndexUtil.getPDataTable(conn, 
ctx.getEnvironment().getRegion()
+.getTableDesc());
 List indexes = dataTable.getIndexes();
 Map indexMaintainers =
 new HashMap();
@@ -187,19 +184,13 @@ public class IndexHalfStoreFileReaderGenerator extends 
BaseRegionObserver {
 return reader;
 }
 
-@SuppressWarnings("deprecation")
 @Override
-public InternalScanner 
preCompactScannerOpen(ObserverContext c,
-Store store, List scanners, ScanType 
scanType,
-long earliestPutTs, InternalScanner s, CompactionRequest request) 
throws IOException {
+public InternalScanner preCompactScannerOpen(
+
org.apache.hadoop.hbase.coprocessor.ObserverContext
 c, Store store,
+java.util.List scanners, ScanType 
scanType, long earliestPutTs,
+InternalScanner s, CompactionRequest request) throws IOException {
+
 if (!IndexUtil.isLocalIndexStore(store)) { return s; }
-Scan scan = null;
-  

phoenix git commit: PHOENIX-4839 IndexHalfStoreFileReaderGenerator throws NullPointerException(Aman Poonia)

2018-08-21 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 0639a7426 -> a575ac04e


PHOENIX-4839 IndexHalfStoreFileReaderGenerator throws NullPointerException(Aman 
Poonia)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a575ac04
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a575ac04
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a575ac04

Branch: refs/heads/4.x-HBase-1.3
Commit: a575ac04ea4b4497209a194169d2121e210ec307
Parents: 0639a74
Author: Ankit Singhal 
Authored: Tue Aug 21 11:51:50 2018 -0700
Committer: Ankit Singhal 
Committed: Tue Aug 21 11:51:50 2018 -0700

--
 .../regionserver/IndexHalfStoreFileReader.java  |   6 +
 .../IndexHalfStoreFileReaderGenerator.java  | 138 ++-
 2 files changed, 18 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a575ac04/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index d1d12fb..8bd0d72 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -123,4 +123,10 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
 public boolean isTop() {
 return top;
 }
+
+@Override
+public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread, boolean isCompaction, long readPt) {
+return new LocalIndexStoreFileScanner(this, getScanner(cacheBlocks, 
pread, isCompaction), true,
+getHFileReader().hasMVCCInfo(), readPt);
+}
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a575ac04/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index e41086b..ab65456 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -17,16 +17,11 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.SCAN_START_ROW_SUFFIX;
-
 import java.io.IOException;
 import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.NavigableSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -71,7 +66,7 @@ import org.apache.phoenix.util.RepairUtil;
 import com.google.common.collect.Lists;
 
 public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
-
+
 private static final String LOCAL_INDEX_AUTOMATIC_REPAIR = 
"local.index.automatic.repair";
 public static final Log LOG = 
LogFactory.getLog(IndexHalfStoreFileReaderGenerator.class);
 
@@ -153,7 +148,9 @@ public class IndexHalfStoreFileReaderGenerator extends 
BaseRegionObserver {
 try {
 conn = 
QueryUtil.getConnectionOnServer(ctx.getEnvironment().getConfiguration()).unwrap(
 PhoenixConnection.class);
-PTable dataTable = IndexUtil.getPDataTable(conn, 
ctx.getEnvironment().getRegion().getTableDesc());
+PTable dataTable =
+IndexUtil.getPDataTable(conn, 
ctx.getEnvironment().getRegion()
+.getTableDesc());
 List indexes = dataTable.getIndexes();
 Map indexMaintainers =
 new HashMap();
@@ -187,19 +184,12 @@ public class IndexHalfStoreFileReaderGenerator extends 
BaseRegionObserver {
 return reader;
 }
 
-@SuppressWarnings("deprecation")
 @Override
-public InternalScanner 
preCompactScannerOpen(ObserverContext c,
-Store store, List scanners, ScanType 
scanType,
-long earliestPutTs, InternalScanner s, CompactionRequest request) 
throws IOException {
+public InternalScanner preCompact(
+ObserverContext c, Store store,
+InternalScann

phoenix git commit: PHOENIX-4825 Replace usage of HBase Base64 implementation with java.util.Base64

2018-07-30 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 85b479b0e -> 8e2e99d96


PHOENIX-4825 Replace usage of HBase Base64 implementation with java.util.Base64


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8e2e99d9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8e2e99d9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8e2e99d9

Branch: refs/heads/4.x-HBase-1.2
Commit: 8e2e99d965e532a007e021a6dd67b26fb2097d17
Parents: 85b479b0e
Author: Ankit Singhal 
Authored: Mon Jul 30 13:57:18 2018 -0700
Committer: Ankit Singhal 
Committed: Mon Jul 30 13:57:18 2018 -0700

--
 .../org/apache/phoenix/end2end/QueryMoreIT.java |  7 +++--
 .../phoenix/mapreduce/CsvBulkImportUtil.java|  8 --
 .../util/PhoenixConfigurationUtil.java  |  7 +++--
 .../apache/phoenix/schema/types/PVarbinary.java |  4 +--
 .../phoenix/util/csv/CsvUpsertExecutor.java |  4 +--
 .../phoenix/util/json/JsonUpsertExecutor.java   |  4 +--
 .../util/AbstractUpsertExecutorTest.java| 12 
 .../util/TenantIdByteConversionTest.java| 30 
 8 files changed, 50 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8e2e99d9/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 04272fa..528fe7f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -31,12 +31,13 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
+import java.util.Base64;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
@@ -278,7 +279,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 values[i] = rs.getObject(i + 1);
 }
 conn = getTenantSpecificConnection(tenantId);
-
pkIds.add(Base64.encodeBytes(PhoenixRuntime.encodeColumnValues(conn, 
tableOrViewName.toUpperCase(), values, columns)));
+
pkIds.add(Bytes.toString(Base64.getEncoder().encode(PhoenixRuntime.encodeColumnValues(conn,
 tableOrViewName.toUpperCase(), values, columns;
 }
 return pkIds.toArray(new String[pkIds.size()]);
 }
@@ -296,7 +297,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 PreparedStatement stmt = conn.prepareStatement(query);
 int bindCounter = 1;
 for (int i = 0; i < cursorIds.length; i++) {
-Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.decode(cursorIds[i]), columns);
+Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.getDecoder().decode(cursorIds[i]), columns);
 for (int j = 0; j < pkParts.length; j++) {
 stmt.setObject(bindCounter++, pkParts[j]);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8e2e99d9/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
index ff9ff72..bf5a538 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
@@ -17,9 +17,11 @@
  */
 package org.apache.phoenix.mapreduce;
 
+import java.util.Base64;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
@@ -68,7 +70,7 @@ public class CsvBulkImportUtil {
 
 @VisibleForTesting
 static void setChar(Configuration conf, String confKey, char charValue) {
-conf.set(confKey, 
Base64.encodeBytes(Character.toString(charValue).getBytes()));
+conf.set(confKey, 
Bytes.toString(Base64.getEncoder().encode(Character.toString(charValue)

phoenix git commit: PHOENIX-4825 Replace usage of HBase Base64 implementation with java.util.Base64

2018-07-30 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 3eecbe985 -> 22934e5af


PHOENIX-4825 Replace usage of HBase Base64 implementation with java.util.Base64


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/22934e5a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/22934e5a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/22934e5a

Branch: refs/heads/4.x-HBase-1.4
Commit: 22934e5af7af79580bf54feeb7667eccafaafc71
Parents: 3eecbe9
Author: Ankit Singhal 
Authored: Mon Jul 30 13:57:40 2018 -0700
Committer: Ankit Singhal 
Committed: Mon Jul 30 13:57:40 2018 -0700

--
 .../org/apache/phoenix/end2end/QueryMoreIT.java |  7 +++--
 .../phoenix/mapreduce/CsvBulkImportUtil.java|  8 --
 .../util/PhoenixConfigurationUtil.java  |  7 +++--
 .../apache/phoenix/schema/types/PVarbinary.java |  4 +--
 .../phoenix/util/csv/CsvUpsertExecutor.java |  4 +--
 .../phoenix/util/json/JsonUpsertExecutor.java   |  4 +--
 .../util/AbstractUpsertExecutorTest.java| 12 
 .../util/TenantIdByteConversionTest.java| 30 
 8 files changed, 50 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/22934e5a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 04272fa..528fe7f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -31,12 +31,13 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
+import java.util.Base64;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
@@ -278,7 +279,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 values[i] = rs.getObject(i + 1);
 }
 conn = getTenantSpecificConnection(tenantId);
-
pkIds.add(Base64.encodeBytes(PhoenixRuntime.encodeColumnValues(conn, 
tableOrViewName.toUpperCase(), values, columns)));
+
pkIds.add(Bytes.toString(Base64.getEncoder().encode(PhoenixRuntime.encodeColumnValues(conn,
 tableOrViewName.toUpperCase(), values, columns;
 }
 return pkIds.toArray(new String[pkIds.size()]);
 }
@@ -296,7 +297,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 PreparedStatement stmt = conn.prepareStatement(query);
 int bindCounter = 1;
 for (int i = 0; i < cursorIds.length; i++) {
-Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.decode(cursorIds[i]), columns);
+Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.getDecoder().decode(cursorIds[i]), columns);
 for (int j = 0; j < pkParts.length; j++) {
 stmt.setObject(bindCounter++, pkParts[j]);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/22934e5a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
index ff9ff72..bf5a538 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
@@ -17,9 +17,11 @@
  */
 package org.apache.phoenix.mapreduce;
 
+import java.util.Base64;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
@@ -68,7 +70,7 @@ public class CsvBulkImportUtil {
 
 @VisibleForTesting
 static void setChar(Configuration conf, String confKey, char charValue) {
-conf.set(confKey, 
Base64.encodeBytes(Character.toString(charValue).getBytes()));
+conf.set(confKey, 
Bytes.toString(Base64.getEncoder().encode(Character.toString(charValue)

phoenix git commit: PHOENIX-4825 Replace usage of HBase Base64 implementation with java.util.Base64

2018-07-30 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 bf6db8f4d -> 6f5926b6b


PHOENIX-4825 Replace usage of HBase Base64 implementation with java.util.Base64


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6f5926b6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6f5926b6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6f5926b6

Branch: refs/heads/4.x-HBase-1.3
Commit: 6f5926b6b1f3d89b7283a5d030d6f46533dc0d39
Parents: bf6db8f
Author: Ankit Singhal 
Authored: Mon Jul 30 13:56:47 2018 -0700
Committer: Ankit Singhal 
Committed: Mon Jul 30 13:56:47 2018 -0700

--
 .../org/apache/phoenix/end2end/QueryMoreIT.java |  7 +++--
 .../phoenix/mapreduce/CsvBulkImportUtil.java|  8 --
 .../util/PhoenixConfigurationUtil.java  |  7 +++--
 .../apache/phoenix/schema/types/PVarbinary.java |  4 +--
 .../phoenix/util/csv/CsvUpsertExecutor.java |  4 +--
 .../phoenix/util/json/JsonUpsertExecutor.java   |  4 +--
 .../util/AbstractUpsertExecutorTest.java| 12 
 .../util/TenantIdByteConversionTest.java| 30 
 8 files changed, 50 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6f5926b6/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 04272fa..528fe7f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -31,12 +31,13 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
+import java.util.Base64;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
@@ -278,7 +279,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 values[i] = rs.getObject(i + 1);
 }
 conn = getTenantSpecificConnection(tenantId);
-
pkIds.add(Base64.encodeBytes(PhoenixRuntime.encodeColumnValues(conn, 
tableOrViewName.toUpperCase(), values, columns)));
+
pkIds.add(Bytes.toString(Base64.getEncoder().encode(PhoenixRuntime.encodeColumnValues(conn,
 tableOrViewName.toUpperCase(), values, columns;
 }
 return pkIds.toArray(new String[pkIds.size()]);
 }
@@ -296,7 +297,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 PreparedStatement stmt = conn.prepareStatement(query);
 int bindCounter = 1;
 for (int i = 0; i < cursorIds.length; i++) {
-Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.decode(cursorIds[i]), columns);
+Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.getDecoder().decode(cursorIds[i]), columns);
 for (int j = 0; j < pkParts.length; j++) {
 stmt.setObject(bindCounter++, pkParts[j]);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6f5926b6/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
index ff9ff72..bf5a538 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
@@ -17,9 +17,11 @@
  */
 package org.apache.phoenix.mapreduce;
 
+import java.util.Base64;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
@@ -68,7 +70,7 @@ public class CsvBulkImportUtil {
 
 @VisibleForTesting
 static void setChar(Configuration conf, String confKey, char charValue) {
-conf.set(confKey, 
Base64.encodeBytes(Character.toString(charValue).getBytes()));
+conf.set(confKey, 
Bytes.toString(Base64.getEncoder().encode(Character.toString(charValue)

phoenix git commit: PHOENIX-4826 Changes to support HBase 2.0.1

2018-07-30 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/master e26e0f29b -> a4f93eb45


PHOENIX-4826 Changes to support HBase 2.0.1


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a4f93eb4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a4f93eb4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a4f93eb4

Branch: refs/heads/master
Commit: a4f93eb458c516206cc3ed25978fb025d752a2a7
Parents: e26e0f2
Author: Ankit Singhal 
Authored: Mon Jul 30 13:52:21 2018 -0700
Committer: Ankit Singhal 
Committed: Mon Jul 30 13:52:21 2018 -0700

--
 .../index/covered/data/DelegateComparator.java  | 83 
 .../hbase/index/covered/data/IndexMemStore.java |  6 +-
 .../index/covered/data/TestIndexMemStore.java   |  6 +-
 pom.xml |  2 +-
 4 files changed, 90 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a4f93eb4/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/DelegateComparator.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/DelegateComparator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/DelegateComparator.java
new file mode 100644
index 000..478d98b
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/DelegateComparator.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hbase.index.covered.data;
+
+import java.util.Comparator;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
+
+public class DelegateComparator implements CellComparator {
+
+private CellComparator delegate;
+
+public DelegateComparator(CellComparator delegate) {
+this.delegate=delegate;
+}
+
+@Override
+public int compare(Cell leftCell, Cell rightCell) {
+return delegate.compare(leftCell, rightCell);
+}
+
+@Override
+public int compareRows(Cell leftCell, Cell rightCell) {
+return delegate.compareRows(leftCell, rightCell);
+}
+
+@Override
+public int compareRows(Cell cell, byte[] bytes, int offset, int length) {
+return delegate.compareRows(cell, bytes, offset, length);
+}
+
+@Override
+public int compareWithoutRow(Cell leftCell, Cell rightCell) {
+return delegate.compareWithoutRow(leftCell, rightCell);
+}
+
+@Override
+public int compareFamilies(Cell leftCell, Cell rightCell) {
+return delegate.compareFamilies(leftCell, rightCell);
+}
+
+@Override
+public int compareQualifiers(Cell leftCell, Cell rightCell) {
+return delegate.compareQualifiers(leftCell, rightCell);
+}
+
+@Override
+public int compareTimestamps(Cell leftCell, Cell rightCell) {
+return delegate.compareTimestamps(leftCell, rightCell);
+}
+
+@Override
+public int compareTimestamps(long leftCellts, long rightCellts) {
+return delegate.compareTimestamps(leftCellts, rightCellts);
+}
+
+@Override
+public int compare(Cell leftCell, Cell rightCell, boolean 
ignoreSequenceid) {
+return delegate.compare(leftCell, rightCell, ignoreSequenceid);
+}
+
+@Override
+public Comparator getSimpleComparator() {
+return delegate.getSimpleComparator();
+}
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a4f93eb4/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
index 8247496..301d825 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
+++ 
b/phoenix

phoenix git commit: PHOENIX-4825 Replace usage of HBase Base64 implementation with java.util.Base64

2018-07-30 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/master e65917eb2 -> e26e0f29b


PHOENIX-4825 Replace usage of HBase Base64 implementation with java.util.Base64


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e26e0f29
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e26e0f29
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e26e0f29

Branch: refs/heads/master
Commit: e26e0f29b91dceaf3ca0a9fb76944803e707fbbc
Parents: e65917e
Author: Ankit Singhal 
Authored: Mon Jul 30 13:51:43 2018 -0700
Committer: Ankit Singhal 
Committed: Mon Jul 30 13:51:43 2018 -0700

--
 .../org/apache/phoenix/end2end/QueryMoreIT.java |  7 +++--
 .../phoenix/mapreduce/CsvBulkImportUtil.java|  8 --
 .../util/PhoenixConfigurationUtil.java  |  7 +++--
 .../apache/phoenix/schema/types/PVarbinary.java |  4 +--
 .../phoenix/util/csv/CsvUpsertExecutor.java |  4 +--
 .../phoenix/util/json/JsonUpsertExecutor.java   |  4 +--
 .../util/AbstractUpsertExecutorTest.java| 12 
 .../util/TenantIdByteConversionTest.java| 30 
 8 files changed, 50 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e26e0f29/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 04272fa..528fe7f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -31,12 +31,13 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
+import java.util.Base64;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
@@ -278,7 +279,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 values[i] = rs.getObject(i + 1);
 }
 conn = getTenantSpecificConnection(tenantId);
-
pkIds.add(Base64.encodeBytes(PhoenixRuntime.encodeColumnValues(conn, 
tableOrViewName.toUpperCase(), values, columns)));
+
pkIds.add(Bytes.toString(Base64.getEncoder().encode(PhoenixRuntime.encodeColumnValues(conn,
 tableOrViewName.toUpperCase(), values, columns;
 }
 return pkIds.toArray(new String[pkIds.size()]);
 }
@@ -296,7 +297,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 PreparedStatement stmt = conn.prepareStatement(query);
 int bindCounter = 1;
 for (int i = 0; i < cursorIds.length; i++) {
-Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.decode(cursorIds[i]), columns);
+Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.getDecoder().decode(cursorIds[i]), columns);
 for (int j = 0; j < pkParts.length; j++) {
 stmt.setObject(bindCounter++, pkParts[j]);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e26e0f29/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
index ff9ff72..bf5a538 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
@@ -17,9 +17,11 @@
  */
 package org.apache.phoenix.mapreduce;
 
+import java.util.Base64;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
@@ -68,7 +70,7 @@ public class CsvBulkImportUtil {
 
 @VisibleForTesting
 static void setChar(Configuration conf, String confKey, char charValue) {
-conf.set(confKey, 
Base64.encodeBytes(Character.toString(charValue).getBytes()));
+conf.set(confKey, 
Bytes.toString(Base64.getEncoder().encode(Character.toString(charValue)

phoenix git commit: PHOENIX-4399 Remove explicit abort on RegionServerServices (addendum)

2018-06-22 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 b78f45c03 -> cb962f946


PHOENIX-4399 Remove explicit abort on RegionServerServices (addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cb962f94
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cb962f94
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cb962f94

Branch: refs/heads/5.x-HBase-2.0
Commit: cb962f946c64d89f84824e10a500258ae293ef45
Parents: b78f45c
Author: Ankit Singhal 
Authored: Fri Jun 22 16:00:54 2018 -0700
Committer: Ankit Singhal 
Committed: Fri Jun 22 16:00:54 2018 -0700

--
 .../apache/phoenix/hbase/index/util/IndexManagementUtil.java   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cb962f94/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
index 6c7966f..bcde1a0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
@@ -197,11 +197,11 @@ public class IndexManagementUtil {
 public static void rethrowIndexingException(Throwable e) throws 
IOException {
 try {
 throw e;
-} catch (IOException e1) {
+} catch (IOException | FatalIndexBuildingFailureException e1) {
 LOG.info("Rethrowing " + e);
 throw e1;
-} catch (Throwable e1) {
-if (e1 instanceof FatalIndexBuildingFailureException) { throw 
(FatalIndexBuildingFailureException)e1; }
+}
+catch (Throwable e1) {
 LOG.info("Rethrowing " + e1 + " as a " + 
IndexBuildingFailureException.class.getSimpleName());
 throw new IndexBuildingFailureException("Failed to build index for 
unexpected reason!", e1);
 }



phoenix git commit: PHOENIX-4399 Remove explicit abort on RegionServerServices (addendum)

2018-06-22 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 aa2f12db9 -> b78f45c03


PHOENIX-4399 Remove explicit abort on RegionServerServices (addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b78f45c0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b78f45c0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b78f45c0

Branch: refs/heads/5.x-HBase-2.0
Commit: b78f45c03ed014297fe848b106da86e58f4aa3e0
Parents: aa2f12d
Author: Ankit Singhal 
Authored: Fri Jun 22 15:58:09 2018 -0700
Committer: Ankit Singhal 
Committed: Fri Jun 22 15:58:09 2018 -0700

--
 .../org/apache/phoenix/hbase/index/util/IndexManagementUtil.java   | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b78f45c0/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
index 2d65747..6c7966f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.phoenix.hbase.index.ValueGetter;
+import 
org.apache.phoenix.hbase.index.builder.FatalIndexBuildingFailureException;
 import org.apache.phoenix.hbase.index.builder.IndexBuildingFailureException;
 import org.apache.phoenix.hbase.index.covered.Batch;
 import org.apache.phoenix.hbase.index.covered.data.LazyValueGetter;
@@ -200,6 +201,7 @@ public class IndexManagementUtil {
 LOG.info("Rethrowing " + e);
 throw e1;
 } catch (Throwable e1) {
+if (e1 instanceof FatalIndexBuildingFailureException) { throw 
(FatalIndexBuildingFailureException)e1; }
 LOG.info("Rethrowing " + e1 + " as a " + 
IndexBuildingFailureException.class.getSimpleName());
 throw new IndexBuildingFailureException("Failed to build index for 
unexpected reason!", e1);
 }



phoenix git commit: PHOENIX-4788 Shade Joda libraries in phoenix-server to avoid conflict with hbase shell

2018-06-22 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 69b50f6ca -> aa2f12db9


PHOENIX-4788 Shade Joda libraries in phoenix-server to avoid conflict with 
hbase shell


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/aa2f12db
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/aa2f12db
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/aa2f12db

Branch: refs/heads/5.x-HBase-2.0
Commit: aa2f12db9ac7e90fbc26f7906e184131f4917782
Parents: 69b50f6
Author: Ankit Singhal 
Authored: Fri Jun 22 15:57:04 2018 -0700
Committer: Ankit Singhal 
Committed: Fri Jun 22 15:57:04 2018 -0700

--
 phoenix-server/pom.xml | 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/aa2f12db/phoenix-server/pom.xml
--
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index e06eba9..2dbbb2b 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -40,6 +40,7 @@
 true
 true
 ${project.basedir}/..
+org.apache.phoenix.shaded
   
 
   
@@ -124,11 +125,11 @@
   org.apache.phoenix:phoenix-core
   org.iq80.snappy:snappy
   org.antlr:antlr*
+  joda-time:joda-time
   org.apache.tephra:tephra*
   com.google.code.gson:gson
   org.jruby.joni:joni
   org.jruby.jcodings:jcodings
-  joda-time:joda-time
   org.apache.twill:twill*
   
com.google.inject.extensions:guice-assistedinject
   it.unimi.dsi:fastutil
@@ -157,6 +158,12 @@
   
 
   
+ 
+
+  org.joda
+  ${shaded.package}.org.joda
+
+ 
 
   
 



phoenix git commit: PHOENIX-4787 Upgrade spark version to 2.3.0

2018-06-22 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 56318da62 -> 69b50f6ca


PHOENIX-4787 Upgrade spark version to 2.3.0


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/69b50f6c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/69b50f6c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/69b50f6c

Branch: refs/heads/5.x-HBase-2.0
Commit: 69b50f6caecd1c994412ae887726c1347f19e730
Parents: 56318da
Author: Ankit Singhal 
Authored: Fri Jun 22 11:58:34 2018 -0700
Committer: Ankit Singhal 
Committed: Fri Jun 22 11:58:34 2018 -0700

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/69b50f6c/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 78aff68..054ad78 100644
--- a/pom.xml
+++ b/pom.xml
@@ -100,7 +100,7 @@
 1.11.0
 9.3.19.v20170502
 0.14.0-incubating
-2.0.2
+2.3.0
 2.11.8
 2.11
 2.9.5



phoenix git commit: PHOENIX-4785 Unable to write to table if index is made active during retry

2018-06-21 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 2dd6310ea -> 56318da62


PHOENIX-4785 Unable to write to table if index is made active during retry


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/56318da6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/56318da6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/56318da6

Branch: refs/heads/5.x-HBase-2.0
Commit: 56318da6206df749c91877275f1b8e7dec8d848d
Parents: 2dd6310
Author: Ankit Singhal 
Authored: Thu Jun 21 16:30:16 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 21 16:30:16 2018 -0700

--
 .../end2end/index/MutableIndexFailureIT.java| 125 ++-
 .../MutableIndexFailureWithNamespaceIT.java |  79 
 .../coprocessor/MetaDataEndpointImpl.java   |  30 +
 .../index/PhoenixIndexFailurePolicy.java|  71 ++-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   1 +
 5 files changed, 272 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/56318da6/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 0daf80f..dbda4e8 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -28,10 +28,16 @@ import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -104,10 +110,10 @@ public class MutableIndexFailureIT extends BaseTest {
 private final boolean throwIndexWriteFailure;
 private String schema = generateUniqueName();
 private List exceptions = Lists.newArrayList();
-private static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
-private static final int forwardOverlapMs = 1000;
-private static final int disableTimestampThresholdMs = 1;
-private static final int numRpcRetries = 2;
+protected static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
+protected static final int forwardOverlapMs = 1000;
+protected static final int disableTimestampThresholdMs = 1;
+protected static final int numRpcRetries = 2;
 
 public MutableIndexFailureIT(boolean transactional, boolean localIndex, 
boolean isNamespaceMapped, Boolean disableIndexOnWriteFailure, boolean 
failRebuildTask, Boolean throwIndexWriteFailure) {
 this.transactional = transactional;
@@ -127,6 +133,20 @@ public class MutableIndexFailureIT extends BaseTest {
 
 @BeforeClass
 public static void doSetup() throws Exception {
+Map serverProps = getServerProps();
+Map clientProps = Maps.newHashMapWithExpectedSize(2);
+clientProps.put(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "2");
+NUM_SLAVES_BASE = 4;
+setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(clientProps.entrySet().iterator()));
+indexRebuildTaskRegionEnvironment = getUtility()
+
.getRSForFirstRegionInTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+
.getRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME).get(0).getCoprocessorHost()
+
.findCoprocessorEnvironment(MetaDataRegionObserver.class.getName());
+MetaDataRegionObserver.initRebuildIndexConnectionProps(
+indexRebuildTaskRegionEnvironment.getConfiguration());
+}
+
+protected static Map getServerProps(){
 Map serverProps = Maps.newHashMapWithExpectedSize(10);
 serverProps.put("hbase.coprocessor.region.classes", 
FailingRegionObserver.class.getName());
 serverProps.put(HConstants.HBASE_RPC_TIMEOUT_KEY, "1");
@@ -142,19 +162,7 @@ public class MutableIndexFailureIT extends BaseTest {
  * because we want to control it's execution ourselves
  */
 serverProps.put(QueryServices.INDEX_REBUILD_TASK_INITIAL_DELAY, 
Long.toString(Long.MAX_VALUE));
-Map clientPro

phoenix git commit: PHOENIX-4785 Unable to write to table if index is made active during retry

2018-06-21 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 22ea19cee -> fe06cacc1


PHOENIX-4785 Unable to write to table if index is made active during retry


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fe06cacc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fe06cacc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fe06cacc

Branch: refs/heads/4.x-HBase-0.98
Commit: fe06cacc1a715f094fbf98e6d42bd9abc5053037
Parents: 22ea19c
Author: Ankit Singhal 
Authored: Thu Jun 21 16:17:25 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 21 16:17:25 2018 -0700

--
 .../end2end/index/MutableIndexFailureIT.java| 128 ++-
 .../MutableIndexFailureWithNamespaceIT.java |  80 
 .../coprocessor/MetaDataEndpointImpl.java   |  30 +
 .../index/PhoenixIndexFailurePolicy.java|  71 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   2 +-
 5 files changed, 276 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fe06cacc/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 715e37f..aac20ac 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -28,10 +28,16 @@ import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -104,10 +110,10 @@ public class MutableIndexFailureIT extends BaseTest {
 private final boolean throwIndexWriteFailure;
 private String schema = generateUniqueName();
 private List exceptions = Lists.newArrayList();
-private static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
-private static final int forwardOverlapMs = 1000;
-private static final int disableTimestampThresholdMs = 1;
-private static final int numRpcRetries = 2;
+protected static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
+protected static final int forwardOverlapMs = 1000;
+protected static final int disableTimestampThresholdMs = 1;
+protected static final int numRpcRetries = 2;
 
 public MutableIndexFailureIT(boolean transactional, boolean localIndex, 
boolean isNamespaceMapped, Boolean disableIndexOnWriteFailure, boolean 
failRebuildTask, Boolean throwIndexWriteFailure) {
 this.transactional = transactional;
@@ -127,6 +133,23 @@ public class MutableIndexFailureIT extends BaseTest {
 
 @BeforeClass
 public static void doSetup() throws Exception {
+Map serverProps = getServerProps();
+Map clientProps = Maps.newHashMapWithExpectedSize(2);
+clientProps.put(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "2");
+NUM_SLAVES_BASE = 4;
+setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(clientProps.entrySet().iterator()));
+indexRebuildTaskRegionEnvironment =
+(RegionCoprocessorEnvironment) getUtility()
+.getRSForFirstRegionInTable(
+
PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+
.getOnlineRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+.get(0).getCoprocessorHost()
+
.findCoprocessorEnvironment(MetaDataRegionObserver.class.getName());
+MetaDataRegionObserver.initRebuildIndexConnectionProps(
+indexRebuildTaskRegionEnvironment.getConfiguration());
+}
+
+protected static Map getServerProps(){
 Map serverProps = Maps.newHashMapWithExpectedSize(10);
 serverProps.put("hbase.coprocessor.region.classes", 
FailingRegionObserver.class.getName());
 serverProps.put(HConstants.HBASE_RPC_TIMEOUT_KEY, "1");
@@ -142,19 +165,7 @@ public class MutableIndexFailureIT extends BaseTest {
  * because we want to control it's execution ourselves

phoenix git commit: PHOENIX-4785 Unable to write to table if index is made active during retry

2018-06-21 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 d8c5112a2 -> aa3ee877d


PHOENIX-4785 Unable to write to table if index is made active during retry


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/aa3ee877
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/aa3ee877
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/aa3ee877

Branch: refs/heads/4.x-HBase-1.1
Commit: aa3ee877d269d60a58dacc1c0f64f1ce82a17959
Parents: d8c5112
Author: Ankit Singhal 
Authored: Thu Jun 21 16:15:05 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 21 16:15:05 2018 -0700

--
 .../end2end/index/MutableIndexFailureIT.java| 128 ++-
 .../MutableIndexFailureWithNamespaceIT.java |  80 
 .../coprocessor/MetaDataEndpointImpl.java   |  30 +
 .../index/PhoenixIndexFailurePolicy.java|  71 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   2 +-
 5 files changed, 276 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/aa3ee877/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 715e37f..aac20ac 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -28,10 +28,16 @@ import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -104,10 +110,10 @@ public class MutableIndexFailureIT extends BaseTest {
 private final boolean throwIndexWriteFailure;
 private String schema = generateUniqueName();
 private List exceptions = Lists.newArrayList();
-private static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
-private static final int forwardOverlapMs = 1000;
-private static final int disableTimestampThresholdMs = 1;
-private static final int numRpcRetries = 2;
+protected static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
+protected static final int forwardOverlapMs = 1000;
+protected static final int disableTimestampThresholdMs = 1;
+protected static final int numRpcRetries = 2;
 
 public MutableIndexFailureIT(boolean transactional, boolean localIndex, 
boolean isNamespaceMapped, Boolean disableIndexOnWriteFailure, boolean 
failRebuildTask, Boolean throwIndexWriteFailure) {
 this.transactional = transactional;
@@ -127,6 +133,23 @@ public class MutableIndexFailureIT extends BaseTest {
 
 @BeforeClass
 public static void doSetup() throws Exception {
+Map serverProps = getServerProps();
+Map clientProps = Maps.newHashMapWithExpectedSize(2);
+clientProps.put(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "2");
+NUM_SLAVES_BASE = 4;
+setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(clientProps.entrySet().iterator()));
+indexRebuildTaskRegionEnvironment =
+(RegionCoprocessorEnvironment) getUtility()
+.getRSForFirstRegionInTable(
+
PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+
.getOnlineRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+.get(0).getCoprocessorHost()
+
.findCoprocessorEnvironment(MetaDataRegionObserver.class.getName());
+MetaDataRegionObserver.initRebuildIndexConnectionProps(
+indexRebuildTaskRegionEnvironment.getConfiguration());
+}
+
+protected static Map getServerProps(){
 Map serverProps = Maps.newHashMapWithExpectedSize(10);
 serverProps.put("hbase.coprocessor.region.classes", 
FailingRegionObserver.class.getName());
 serverProps.put(HConstants.HBASE_RPC_TIMEOUT_KEY, "1");
@@ -142,19 +165,7 @@ public class MutableIndexFailureIT extends BaseTest {
  * because we want to control it's execution ourselves

phoenix git commit: PHOENIX-4785 Unable to write to table if index is made active during retry

2018-06-21 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 328656fc8 -> d41173ce2


PHOENIX-4785 Unable to write to table if index is made active during retry


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d41173ce
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d41173ce
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d41173ce

Branch: refs/heads/4.x-HBase-1.2
Commit: d41173ce2880e93e5475118e0792dd457a16716f
Parents: 328656f
Author: Ankit Singhal 
Authored: Thu Jun 21 16:13:50 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 21 16:13:50 2018 -0700

--
 .../end2end/index/MutableIndexFailureIT.java| 128 ++-
 .../MutableIndexFailureWithNamespaceIT.java |  80 
 .../coprocessor/MetaDataEndpointImpl.java   |  30 +
 .../index/PhoenixIndexFailurePolicy.java|  71 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   2 +-
 5 files changed, 276 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d41173ce/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 715e37f..aac20ac 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -28,10 +28,16 @@ import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -104,10 +110,10 @@ public class MutableIndexFailureIT extends BaseTest {
 private final boolean throwIndexWriteFailure;
 private String schema = generateUniqueName();
 private List exceptions = Lists.newArrayList();
-private static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
-private static final int forwardOverlapMs = 1000;
-private static final int disableTimestampThresholdMs = 1;
-private static final int numRpcRetries = 2;
+protected static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
+protected static final int forwardOverlapMs = 1000;
+protected static final int disableTimestampThresholdMs = 1;
+protected static final int numRpcRetries = 2;
 
 public MutableIndexFailureIT(boolean transactional, boolean localIndex, 
boolean isNamespaceMapped, Boolean disableIndexOnWriteFailure, boolean 
failRebuildTask, Boolean throwIndexWriteFailure) {
 this.transactional = transactional;
@@ -127,6 +133,23 @@ public class MutableIndexFailureIT extends BaseTest {
 
 @BeforeClass
 public static void doSetup() throws Exception {
+Map serverProps = getServerProps();
+Map clientProps = Maps.newHashMapWithExpectedSize(2);
+clientProps.put(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "2");
+NUM_SLAVES_BASE = 4;
+setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(clientProps.entrySet().iterator()));
+indexRebuildTaskRegionEnvironment =
+(RegionCoprocessorEnvironment) getUtility()
+.getRSForFirstRegionInTable(
+
PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+
.getOnlineRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+.get(0).getCoprocessorHost()
+
.findCoprocessorEnvironment(MetaDataRegionObserver.class.getName());
+MetaDataRegionObserver.initRebuildIndexConnectionProps(
+indexRebuildTaskRegionEnvironment.getConfiguration());
+}
+
+protected static Map getServerProps(){
 Map serverProps = Maps.newHashMapWithExpectedSize(10);
 serverProps.put("hbase.coprocessor.region.classes", 
FailingRegionObserver.class.getName());
 serverProps.put(HConstants.HBASE_RPC_TIMEOUT_KEY, "1");
@@ -142,19 +165,7 @@ public class MutableIndexFailureIT extends BaseTest {
  * because we want to control it's execution ourselves

phoenix git commit: PHOENIX-4785 Unable to write to table if index is made active during retry

2018-06-21 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 a9170b232 -> 5771eb213


PHOENIX-4785 Unable to write to table if index is made active during retry


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5771eb21
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5771eb21
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5771eb21

Branch: refs/heads/4.x-HBase-1.3
Commit: 5771eb213feca0e50c9f3542b5118e44b7816f3e
Parents: a9170b2
Author: Ankit Singhal 
Authored: Thu Jun 21 16:11:41 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 21 16:11:41 2018 -0700

--
 .../end2end/index/MutableIndexFailureIT.java| 128 ++-
 .../MutableIndexFailureWithNamespaceIT.java |  80 
 .../coprocessor/MetaDataEndpointImpl.java   |  30 +
 .../index/PhoenixIndexFailurePolicy.java|  71 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   2 +-
 5 files changed, 276 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5771eb21/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index dfbaf3f..8f88513 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -28,10 +28,16 @@ import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -104,10 +110,10 @@ public class MutableIndexFailureIT extends BaseTest {
 private final boolean throwIndexWriteFailure;
 private String schema = generateUniqueName();
 private List exceptions = Lists.newArrayList();
-private static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
-private static final int forwardOverlapMs = 1000;
-private static final int disableTimestampThresholdMs = 1;
-private static final int numRpcRetries = 2;
+protected static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
+protected static final int forwardOverlapMs = 1000;
+protected static final int disableTimestampThresholdMs = 1;
+protected static final int numRpcRetries = 2;
 
 public MutableIndexFailureIT(boolean transactional, boolean localIndex, 
boolean isNamespaceMapped, Boolean disableIndexOnWriteFailure, boolean 
failRebuildTask, Boolean throwIndexWriteFailure) {
 this.transactional = transactional;
@@ -127,6 +133,23 @@ public class MutableIndexFailureIT extends BaseTest {
 
 @BeforeClass
 public static void doSetup() throws Exception {
+Map serverProps = getServerProps();
+Map clientProps = Maps.newHashMapWithExpectedSize(2);
+clientProps.put(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "2");
+NUM_SLAVES_BASE = 4;
+setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(clientProps.entrySet().iterator()));
+indexRebuildTaskRegionEnvironment =
+(RegionCoprocessorEnvironment) getUtility()
+.getRSForFirstRegionInTable(
+
PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+
.getOnlineRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+.get(0).getCoprocessorHost()
+
.findCoprocessorEnvironment(MetaDataRegionObserver.class.getName());
+MetaDataRegionObserver.initRebuildIndexConnectionProps(
+indexRebuildTaskRegionEnvironment.getConfiguration());
+}
+
+protected static Map getServerProps(){
 Map serverProps = Maps.newHashMapWithExpectedSize(10);
 serverProps.put("hbase.coprocessor.region.classes", 
FailingRegionObserver.class.getName());
 serverProps.put(HConstants.HBASE_RPC_TIMEOUT_KEY, "1");
@@ -142,19 +165,7 @@ public class MutableIndexFailureIT extends BaseTest {
  * because we want to control it's execution ourselves

phoenix git commit: PHOENIX-4785 Unable to write to table if index is made active during retry

2018-06-21 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/master 9dbe20ac7 -> 6195f8e7b


PHOENIX-4785 Unable to write to table if index is made active during retry


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6195f8e7
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6195f8e7
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6195f8e7

Branch: refs/heads/master
Commit: 6195f8e7b5efeecd5c736ba0ef121b706c875d8d
Parents: 9dbe20a
Author: Ankit Singhal 
Authored: Thu Jun 21 16:11:02 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 21 16:11:02 2018 -0700

--
 .../end2end/index/MutableIndexFailureIT.java| 128 ++-
 .../MutableIndexFailureWithNamespaceIT.java |  80 
 .../coprocessor/MetaDataEndpointImpl.java   |  30 +
 .../index/PhoenixIndexFailurePolicy.java|  71 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   2 +-
 5 files changed, 276 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6195f8e7/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index dfbaf3f..8f88513 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -28,10 +28,16 @@ import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -104,10 +110,10 @@ public class MutableIndexFailureIT extends BaseTest {
 private final boolean throwIndexWriteFailure;
 private String schema = generateUniqueName();
 private List exceptions = Lists.newArrayList();
-private static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
-private static final int forwardOverlapMs = 1000;
-private static final int disableTimestampThresholdMs = 1;
-private static final int numRpcRetries = 2;
+protected static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
+protected static final int forwardOverlapMs = 1000;
+protected static final int disableTimestampThresholdMs = 1;
+protected static final int numRpcRetries = 2;
 
 public MutableIndexFailureIT(boolean transactional, boolean localIndex, 
boolean isNamespaceMapped, Boolean disableIndexOnWriteFailure, boolean 
failRebuildTask, Boolean throwIndexWriteFailure) {
 this.transactional = transactional;
@@ -127,6 +133,23 @@ public class MutableIndexFailureIT extends BaseTest {
 
 @BeforeClass
 public static void doSetup() throws Exception {
+Map serverProps = getServerProps();
+Map clientProps = Maps.newHashMapWithExpectedSize(2);
+clientProps.put(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "2");
+NUM_SLAVES_BASE = 4;
+setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(clientProps.entrySet().iterator()));
+indexRebuildTaskRegionEnvironment =
+(RegionCoprocessorEnvironment) getUtility()
+.getRSForFirstRegionInTable(
+
PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+
.getOnlineRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+.get(0).getCoprocessorHost()
+
.findCoprocessorEnvironment(MetaDataRegionObserver.class.getName());
+MetaDataRegionObserver.initRebuildIndexConnectionProps(
+indexRebuildTaskRegionEnvironment.getConfiguration());
+}
+
+protected static Map getServerProps(){
 Map serverProps = Maps.newHashMapWithExpectedSize(10);
 serverProps.put("hbase.coprocessor.region.classes", 
FailingRegionObserver.class.getName());
 serverProps.put(HConstants.HBASE_RPC_TIMEOUT_KEY, "1");
@@ -142,19 +165,7 @@ public class MutableIndexFailureIT extends BaseTest {
  * because we want to control it's execution ourselves

phoenix git commit: Revert "PHOENIX-4768 Re-enable testCompactUpdatesStats and testCompactUpdatesStatsWithMinStatsUpdateFreq of StatsCollectorIT"

2018-06-21 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 43da29f47 -> 2dd6310ea


Revert "PHOENIX-4768 Re-enable testCompactUpdatesStats and 
testCompactUpdatesStatsWithMinStatsUpdateFreq of StatsCollectorIT"

This reverts commit 09c017e3cc8a2d1ec4ce27c65a5c76b71de138a0.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2dd6310e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2dd6310e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2dd6310e

Branch: refs/heads/5.x-HBase-2.0
Commit: 2dd6310ea1057f2cb46361817e2b3a3ecb1887e7
Parents: 43da29f
Author: Ankit Singhal 
Authored: Thu Jun 21 14:41:36 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 21 14:41:36 2018 -0700

--
 .../org/apache/phoenix/schema/stats/StatsCollectorIT.java| 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2dd6310e/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
index c2325ae..3af0d09 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
@@ -68,6 +69,7 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -403,11 +405,13 @@ public abstract class StatsCollectorIT extends 
BaseUniqueNamesOwnClusterIT {
 }
 
 @Test
+@Ignore //TODO remove this once  
https://issues.apache.org/jira/browse/TEPHRA-208 is fixed
 public void testCompactUpdatesStats() throws Exception {
 testCompactUpdatesStats(0, fullTableName);
 }
 
 @Test
+@Ignore //TODO remove this once  
https://issues.apache.org/jira/browse/TEPHRA-208 is fixed
 public void testCompactUpdatesStatsWithMinStatsUpdateFreq() throws 
Exception {
 
testCompactUpdatesStats(QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS, 
fullTableName);
 }
@@ -460,7 +464,7 @@ public abstract class StatsCollectorIT extends 
BaseUniqueNamesOwnClusterIT {
 Scan scan = new Scan();
 scan.setRaw(true);
 PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
-try (Table htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(physicalTableName))) {
+try (Table htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
 ResultScanner scanner = htable.getScanner(scan);
 Result result;
 while ((result = scanner.next())!=null) {
@@ -473,7 +477,7 @@ public abstract class StatsCollectorIT extends 
BaseUniqueNamesOwnClusterIT {
 scan = new Scan();
 scan.setRaw(true);
 phxConn = conn.unwrap(PhoenixConnection.class);
-try (Table htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(physicalTableName))) {
+try (Table htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
 ResultScanner scanner = htable.getScanner(scan);
 Result result;
 while ((result = scanner.next())!=null) {



[2/2] phoenix git commit: PHOENIX-4544 Update statistics inconsistent behavior

2018-06-07 Thread ankit
PHOENIX-4544 Update statistics inconsistent behavior


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c3201a26
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c3201a26
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c3201a26

Branch: refs/heads/4.x-HBase-0.98
Commit: c3201a263937a7765877c67c1bd5265516a0a3c9
Parents: dc1f969
Author: Ankit Singhal 
Authored: Thu Jun 7 11:30:06 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 7 11:30:06 2018 -0700

--
 .../StatisticsCollectionRunTrackerIT.java   | 33 +-
 .../UngroupedAggregateRegionObserver.java   |  4 +-
 .../apache/phoenix/schema/MetaDataClient.java   | 10 -
 .../stats/StatisticsCollectionRunTracker.java   | 45 +---
 .../java/org/apache/phoenix/util/ByteUtil.java  | 16 ++-
 5 files changed, 87 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3201a26/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
index 05ef9bd..8f00a0b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
@@ -25,6 +25,8 @@ import static org.junit.Assert.assertTrue;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
+import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
@@ -60,12 +62,15 @@ public class StatisticsCollectionRunTrackerIT extends 
ParallelStatsEnabledIT {
 StatisticsCollectionRunTracker tracker =
 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
 // assert that the region wasn't added to the tracker
-assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo));
+assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");
 // assert that removing the region from the tracker works
-assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo));
+assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");
 runUpdateStats(tableName);
 // assert that after update stats is complete, tracker isn't tracking 
the region any more
-assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo));
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");;
 }
 
 @Test
@@ -102,26 +107,31 @@ public class StatisticsCollectionRunTrackerIT extends 
ParallelStatsEnabledIT {
 HRegionInfo regionInfo = createTableAndGetRegion(tableName);
 // simulate stats collection via major compaction by marking the 
region as compacting in the tracker
 markRegionAsCompacting(regionInfo);
-Assert.assertEquals("Row count didn't match", 
COMPACTION_UPDATE_STATS_ROW_COUNT, runUpdateStats(tableName));
+// there will be no update for local index and a table , so checking 2 
* COMPACTION_UPDATE_STATS_ROW_COUNT
+Assert.assertEquals("Row count didn't match", 
COMPACTION_UPDATE_STATS_ROW_COUNT * 2, runUpdateStats(tableName));
 StatisticsCollectionRunTracker tracker =
 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
 // assert that the tracker state was cleared.
-assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo));
+HashSet familyMap = new 
HashSet(Arrays.asList(Bytes.toBytes("0")));
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, 
familyMap));
 }
 
 @Test
 public void testUpdateStatsPreventsAnotherUpdateStatsFromRunning() throws 
Exception {
 String tableName = fullTableName;
 HRegionInfo regionInfo = createTableAndGetRegion(tableName);
-markRunningUpda

[1/2] phoenix git commit: PHOENIX-4772 phoenix.sequence.saltBuckets is not honoured

2018-06-07 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 9d4912d72 -> c3201a263


PHOENIX-4772 phoenix.sequence.saltBuckets is not honoured


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/dc1f9691
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/dc1f9691
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/dc1f9691

Branch: refs/heads/4.x-HBase-0.98
Commit: dc1f96915a12c643223ff502d161e44d889c0f71
Parents: 9d4912d
Author: Ankit Singhal 
Authored: Thu Jun 7 11:26:46 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 7 11:26:46 2018 -0700

--
 .../org/apache/phoenix/end2end/SequenceIT.java  |  57 +-
 .../phoenix/end2end/SequencePointInTimeIT.java  | 112 +++
 .../query/ConnectionQueryServicesImpl.java  |  13 ++-
 .../query/ConnectionlessQueryServicesImpl.java  |   7 +-
 .../apache/phoenix/query/QueryConstants.java|   2 +
 .../org/apache/phoenix/schema/Sequence.java |   6 +-
 6 files changed, 139 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/dc1f9691/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
index 4cc9628..b76cc4e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
@@ -18,6 +18,8 @@
 
 package org.apache.phoenix.end2end;
 
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE;
 import static 
org.apache.phoenix.query.QueryServicesTestImpl.DEFAULT_SEQUENCE_CACHE_SIZE;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
@@ -38,6 +40,7 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesTestImpl;
 import org.apache.phoenix.schema.SchemaNotFoundException;
 import org.apache.phoenix.schema.SequenceAlreadyExistsException;
 import org.apache.phoenix.schema.SequenceNotFoundException;
@@ -202,6 +205,8 @@ public class SequenceIT extends ParallelStatsDisabledIT {
 String schemaName = getSchemaName(sequenceName);
 
 conn.createStatement().execute("CREATE SEQUENCE " + sequenceName + " 
START WITH 2 INCREMENT BY 4");
+int bucketNum = PhoenixRuntime.getTableNoCache(conn, 
SYSTEM_CATALOG_SCHEMA + "." + TYPE_SEQUENCE).getBucketNum();
+assertEquals("Salt bucket for SYSTEM.SEQUENCE should be test 
default",bucketNum , QueryServicesTestImpl.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS);
 String query = "SELECT sequence_schema, sequence_name, current_value, 
increment_by FROM \"SYSTEM\".\"SEQUENCE\" WHERE sequence_name='" + 
sequenceNameWithoutSchema + "'";
 ResultSet rs = conn.prepareStatement(query).executeQuery();
 assertTrue(rs.next());
@@ -1406,56 +1411,4 @@ public class SequenceIT extends ParallelStatsDisabledIT {
return tableName.substring(tableName.indexOf(".") + 1, 
tableName.length());
 }
 
-@Test
-public void testPointInTimeSequence() throws Exception {
-String seqName = generateSequenceNameWithSchema(); 
-Properties scnProps = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-scnProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(EnvironmentEdgeManager.currentTimeMillis()));
-Connection beforeSeqConn = DriverManager.getConnection(getUrl(), 
scnProps);
-
-ResultSet rs;
-Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-Connection conn = DriverManager.getConnection(getUrl(), props);
-conn.createStatement().execute("CREATE SEQUENCE " + seqName + "");
-
-try {
-beforeSeqConn.createStatement().executeQuery("SELECT next value 
for " + seqName);
-fail();
-} catch (SequenceNotFoundException e) {
-beforeSeqConn.close();
-}
-
-scnProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(EnvironmentEdgeManager.currentTimeMillis()));
-Connection afterSeqConn = DriverManager.getConnection(getUrl(), 
scnProps);
-
-rs = conn.createStatement().executeQuery("SELECT next value for " + 
seqName);
-asser

[1/2] phoenix git commit: PHOENIX-4544 Update statistics inconsistent behavior

2018-06-07 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 6f824e7fd -> 9d90af77b


PHOENIX-4544 Update statistics inconsistent behavior


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c9922c1f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c9922c1f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c9922c1f

Branch: refs/heads/4.x-HBase-1.1
Commit: c9922c1f1e0749463db50049d135bfcc9c705031
Parents: 6f824e7
Author: Ankit Singhal 
Authored: Thu Jun 7 11:24:12 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 7 11:24:12 2018 -0700

--
 .../StatisticsCollectionRunTrackerIT.java   | 32 --
 .../UngroupedAggregateRegionObserver.java   |  4 +-
 .../apache/phoenix/schema/MetaDataClient.java   | 10 -
 .../stats/StatisticsCollectionRunTracker.java   | 45 +---
 .../java/org/apache/phoenix/util/ByteUtil.java  | 16 ++-
 5 files changed, 85 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c9922c1f/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
index cf475f9..a643383 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
@@ -25,6 +25,8 @@ import static org.junit.Assert.assertTrue;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
+import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
@@ -61,12 +63,15 @@ public class StatisticsCollectionRunTrackerIT extends 
ParallelStatsEnabledIT {
 StatisticsCollectionRunTracker tracker =
 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
 // assert that the region wasn't added to the tracker
-assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo));
+assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");
 // assert that removing the region from the tracker works
-assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo));
+assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");
 runUpdateStats(tableName);
 // assert that after update stats is complete, tracker isn't tracking 
the region any more
-assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo));
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");;
 }
 
 @Test
@@ -103,25 +108,27 @@ public class StatisticsCollectionRunTrackerIT extends 
ParallelStatsEnabledIT {
 HRegionInfo regionInfo = createTableAndGetRegion(tableName);
 // simulate stats collection via major compaction by marking the 
region as compacting in the tracker
 markRegionAsCompacting(regionInfo);
-Assert.assertEquals("Row count didn't match", 
COMPACTION_UPDATE_STATS_ROW_COUNT, runUpdateStats(tableName));
-StatisticsCollectionRunTracker tracker =
+// there will be no update for local index and a table , so checking 2 
* COMPACTION_UPDATE_STATS_ROW_COUNT
+Assert.assertEquals("Row count didn't match", 
COMPACTION_UPDATE_STATS_ROW_COUNT * 2, runUpdateStats(tableName));
 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
 // assert that the tracker state was cleared.
-assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo));
+HashSet familyMap = new 
HashSet(Arrays.asList(Bytes.toBytes("0")));
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, 
familyMap));
 }
 
 @Test
 public void testUpdateStatsPreventsAnotherUpdateStatsFromRunning() throws 
Exception {
 String tableName = fullTableName;
 HRegionInfo regionInf

[2/2] phoenix git commit: PHOENIX-4772 phoenix.sequence.saltBuckets is not honoured

2018-06-07 Thread ankit
PHOENIX-4772 phoenix.sequence.saltBuckets is not honoured


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9d90af77
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9d90af77
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9d90af77

Branch: refs/heads/4.x-HBase-1.1
Commit: 9d90af77bc0af66ddca2c68f9878d7c2155b6f1a
Parents: c9922c1
Author: Ankit Singhal 
Authored: Thu Jun 7 11:24:25 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 7 11:24:25 2018 -0700

--
 .../org/apache/phoenix/end2end/SequenceIT.java  |  57 +-
 .../phoenix/end2end/SequencePointInTimeIT.java  | 112 +++
 .../query/ConnectionQueryServicesImpl.java  |  13 ++-
 .../query/ConnectionlessQueryServicesImpl.java  |   7 +-
 .../apache/phoenix/query/QueryConstants.java|   2 +
 .../org/apache/phoenix/schema/Sequence.java |   6 +-
 6 files changed, 139 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9d90af77/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
index 4cc9628..b76cc4e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
@@ -18,6 +18,8 @@
 
 package org.apache.phoenix.end2end;
 
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE;
 import static 
org.apache.phoenix.query.QueryServicesTestImpl.DEFAULT_SEQUENCE_CACHE_SIZE;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
@@ -38,6 +40,7 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesTestImpl;
 import org.apache.phoenix.schema.SchemaNotFoundException;
 import org.apache.phoenix.schema.SequenceAlreadyExistsException;
 import org.apache.phoenix.schema.SequenceNotFoundException;
@@ -202,6 +205,8 @@ public class SequenceIT extends ParallelStatsDisabledIT {
 String schemaName = getSchemaName(sequenceName);
 
 conn.createStatement().execute("CREATE SEQUENCE " + sequenceName + " 
START WITH 2 INCREMENT BY 4");
+int bucketNum = PhoenixRuntime.getTableNoCache(conn, 
SYSTEM_CATALOG_SCHEMA + "." + TYPE_SEQUENCE).getBucketNum();
+assertEquals("Salt bucket for SYSTEM.SEQUENCE should be test 
default",bucketNum , QueryServicesTestImpl.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS);
 String query = "SELECT sequence_schema, sequence_name, current_value, 
increment_by FROM \"SYSTEM\".\"SEQUENCE\" WHERE sequence_name='" + 
sequenceNameWithoutSchema + "'";
 ResultSet rs = conn.prepareStatement(query).executeQuery();
 assertTrue(rs.next());
@@ -1406,56 +1411,4 @@ public class SequenceIT extends ParallelStatsDisabledIT {
return tableName.substring(tableName.indexOf(".") + 1, 
tableName.length());
 }
 
-@Test
-public void testPointInTimeSequence() throws Exception {
-String seqName = generateSequenceNameWithSchema(); 
-Properties scnProps = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-scnProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(EnvironmentEdgeManager.currentTimeMillis()));
-Connection beforeSeqConn = DriverManager.getConnection(getUrl(), 
scnProps);
-
-ResultSet rs;
-Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-Connection conn = DriverManager.getConnection(getUrl(), props);
-conn.createStatement().execute("CREATE SEQUENCE " + seqName + "");
-
-try {
-beforeSeqConn.createStatement().executeQuery("SELECT next value 
for " + seqName);
-fail();
-} catch (SequenceNotFoundException e) {
-beforeSeqConn.close();
-}
-
-scnProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(EnvironmentEdgeManager.currentTimeMillis()));
-Connection afterSeqConn = DriverManager.getConnection(getUrl(), 
scnProps);
-
-rs = conn.createStatement().executeQuery("SELECT next value for " + 
seqName);
-assertTrue(rs.next());
-assertEquals(1, rs.getInt(1));
-rs = conn.createStatement().executeQuery(&

[2/2] phoenix git commit: PHOENIX-4544 Update statistics inconsistent behavior

2018-06-07 Thread ankit
PHOENIX-4544 Update statistics inconsistent behavior


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0bff15f1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0bff15f1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0bff15f1

Branch: refs/heads/4.x-HBase-1.2
Commit: 0bff15f1760c2485dc75aa7934dccc854d58c620
Parents: 0f8ed35
Author: Ankit Singhal 
Authored: Thu Jun 7 11:23:56 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 7 11:23:56 2018 -0700

--
 .../StatisticsCollectionRunTrackerIT.java   | 32 --
 .../UngroupedAggregateRegionObserver.java   |  4 +-
 .../apache/phoenix/schema/MetaDataClient.java   | 10 -
 .../stats/StatisticsCollectionRunTracker.java   | 45 +---
 .../java/org/apache/phoenix/util/ByteUtil.java  | 16 ++-
 5 files changed, 85 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0bff15f1/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
index cf475f9..a643383 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
@@ -25,6 +25,8 @@ import static org.junit.Assert.assertTrue;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
+import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
@@ -61,12 +63,15 @@ public class StatisticsCollectionRunTrackerIT extends 
ParallelStatsEnabledIT {
 StatisticsCollectionRunTracker tracker =
 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
 // assert that the region wasn't added to the tracker
-assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo));
+assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");
 // assert that removing the region from the tracker works
-assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo));
+assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");
 runUpdateStats(tableName);
 // assert that after update stats is complete, tracker isn't tracking 
the region any more
-assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo));
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");;
 }
 
 @Test
@@ -103,25 +108,27 @@ public class StatisticsCollectionRunTrackerIT extends 
ParallelStatsEnabledIT {
 HRegionInfo regionInfo = createTableAndGetRegion(tableName);
 // simulate stats collection via major compaction by marking the 
region as compacting in the tracker
 markRegionAsCompacting(regionInfo);
-Assert.assertEquals("Row count didn't match", 
COMPACTION_UPDATE_STATS_ROW_COUNT, runUpdateStats(tableName));
-StatisticsCollectionRunTracker tracker =
+// there will be no update for local index and a table , so checking 2 
* COMPACTION_UPDATE_STATS_ROW_COUNT
+Assert.assertEquals("Row count didn't match", 
COMPACTION_UPDATE_STATS_ROW_COUNT * 2, runUpdateStats(tableName));
 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
 // assert that the tracker state was cleared.
-assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo));
+HashSet familyMap = new 
HashSet(Arrays.asList(Bytes.toBytes("0")));
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, 
familyMap));
 }
 
 @Test
 public void testUpdateStatsPreventsAnotherUpdateStatsFromRunning() throws 
Exception {
 String tableName = fullTableName;
 HRegionInfo regionInfo = createTableAndGetRegion(tableName);
-markRunningUpdateStats(regionInfo);
-   

[1/2] phoenix git commit: PHOENIX-4772 phoenix.sequence.saltBuckets is not honoured

2018-06-07 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 00eb60a58 -> 0bff15f17


PHOENIX-4772 phoenix.sequence.saltBuckets is not honoured


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0f8ed355
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0f8ed355
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0f8ed355

Branch: refs/heads/4.x-HBase-1.2
Commit: 0f8ed355d8f2c20a69c2a04ed80c96f5124a9acc
Parents: 00eb60a
Author: Ankit Singhal 
Authored: Thu Jun 7 11:23:44 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 7 11:23:44 2018 -0700

--
 .../org/apache/phoenix/end2end/SequenceIT.java  |  57 +-
 .../phoenix/end2end/SequencePointInTimeIT.java  | 112 +++
 .../query/ConnectionQueryServicesImpl.java  |  13 ++-
 .../query/ConnectionlessQueryServicesImpl.java  |   7 +-
 .../apache/phoenix/query/QueryConstants.java|   2 +
 .../org/apache/phoenix/schema/Sequence.java |   6 +-
 6 files changed, 139 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f8ed355/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
index 4cc9628..b76cc4e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
@@ -18,6 +18,8 @@
 
 package org.apache.phoenix.end2end;
 
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE;
 import static 
org.apache.phoenix.query.QueryServicesTestImpl.DEFAULT_SEQUENCE_CACHE_SIZE;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
@@ -38,6 +40,7 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesTestImpl;
 import org.apache.phoenix.schema.SchemaNotFoundException;
 import org.apache.phoenix.schema.SequenceAlreadyExistsException;
 import org.apache.phoenix.schema.SequenceNotFoundException;
@@ -202,6 +205,8 @@ public class SequenceIT extends ParallelStatsDisabledIT {
 String schemaName = getSchemaName(sequenceName);
 
 conn.createStatement().execute("CREATE SEQUENCE " + sequenceName + " 
START WITH 2 INCREMENT BY 4");
+int bucketNum = PhoenixRuntime.getTableNoCache(conn, 
SYSTEM_CATALOG_SCHEMA + "." + TYPE_SEQUENCE).getBucketNum();
+assertEquals("Salt bucket for SYSTEM.SEQUENCE should be test 
default",bucketNum , QueryServicesTestImpl.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS);
 String query = "SELECT sequence_schema, sequence_name, current_value, 
increment_by FROM \"SYSTEM\".\"SEQUENCE\" WHERE sequence_name='" + 
sequenceNameWithoutSchema + "'";
 ResultSet rs = conn.prepareStatement(query).executeQuery();
 assertTrue(rs.next());
@@ -1406,56 +1411,4 @@ public class SequenceIT extends ParallelStatsDisabledIT {
return tableName.substring(tableName.indexOf(".") + 1, 
tableName.length());
 }
 
-@Test
-public void testPointInTimeSequence() throws Exception {
-String seqName = generateSequenceNameWithSchema(); 
-Properties scnProps = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-scnProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(EnvironmentEdgeManager.currentTimeMillis()));
-Connection beforeSeqConn = DriverManager.getConnection(getUrl(), 
scnProps);
-
-ResultSet rs;
-Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-Connection conn = DriverManager.getConnection(getUrl(), props);
-conn.createStatement().execute("CREATE SEQUENCE " + seqName + "");
-
-try {
-beforeSeqConn.createStatement().executeQuery("SELECT next value 
for " + seqName);
-fail();
-} catch (SequenceNotFoundException e) {
-beforeSeqConn.close();
-}
-
-scnProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(EnvironmentEdgeManager.currentTimeMillis()));
-Connection afterSeqConn = DriverManager.getConnection(getUrl(), 
scnProps);
-
-rs = conn.createStatement().executeQuery("SELECT next value for " + 
seqName);
-asser

[1/2] phoenix git commit: PHOENIX-4544 Update statistics inconsistent behavior

2018-06-07 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 4e7d88b17 -> 7a7172a4b


PHOENIX-4544 Update statistics inconsistent behavior


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fef7aa9e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fef7aa9e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fef7aa9e

Branch: refs/heads/4.x-HBase-1.3
Commit: fef7aa9e1d8281753e39d2a7e5b4932ebf102cc7
Parents: 4e7d88b
Author: Ankit Singhal 
Authored: Thu Jun 7 11:22:56 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 7 11:22:56 2018 -0700

--
 .../StatisticsCollectionRunTrackerIT.java   | 32 --
 .../UngroupedAggregateRegionObserver.java   |  4 +-
 .../apache/phoenix/schema/MetaDataClient.java   | 10 -
 .../stats/StatisticsCollectionRunTracker.java   | 45 +---
 .../java/org/apache/phoenix/util/ByteUtil.java  | 16 ++-
 5 files changed, 85 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fef7aa9e/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
index cf475f9..a643383 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
@@ -25,6 +25,8 @@ import static org.junit.Assert.assertTrue;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
+import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
@@ -61,12 +63,15 @@ public class StatisticsCollectionRunTrackerIT extends 
ParallelStatsEnabledIT {
 StatisticsCollectionRunTracker tracker =
 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
 // assert that the region wasn't added to the tracker
-assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo));
+assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");
 // assert that removing the region from the tracker works
-assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo));
+assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");
 runUpdateStats(tableName);
 // assert that after update stats is complete, tracker isn't tracking 
the region any more
-assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo));
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");;
 }
 
 @Test
@@ -103,25 +108,27 @@ public class StatisticsCollectionRunTrackerIT extends 
ParallelStatsEnabledIT {
 HRegionInfo regionInfo = createTableAndGetRegion(tableName);
 // simulate stats collection via major compaction by marking the 
region as compacting in the tracker
 markRegionAsCompacting(regionInfo);
-Assert.assertEquals("Row count didn't match", 
COMPACTION_UPDATE_STATS_ROW_COUNT, runUpdateStats(tableName));
-StatisticsCollectionRunTracker tracker =
+// there will be no update for local index and a table , so checking 2 
* COMPACTION_UPDATE_STATS_ROW_COUNT
+Assert.assertEquals("Row count didn't match", 
COMPACTION_UPDATE_STATS_ROW_COUNT * 2, runUpdateStats(tableName));
 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
 // assert that the tracker state was cleared.
-assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo));
+HashSet familyMap = new 
HashSet(Arrays.asList(Bytes.toBytes("0")));
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, 
familyMap));
 }
 
 @Test
 public void testUpdateStatsPreventsAnotherUpdateStatsFromRunning() throws 
Exception {
 String tableName = fullTableName;
 HRegionInfo regionInf

[2/2] phoenix git commit: PHOENIX-4772 phoenix.sequence.saltBuckets is not honoured

2018-06-07 Thread ankit
PHOENIX-4772 phoenix.sequence.saltBuckets is not honoured


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7a7172a4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7a7172a4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7a7172a4

Branch: refs/heads/4.x-HBase-1.3
Commit: 7a7172a4b8d2bce115ad7ceb11888379456ada83
Parents: fef7aa9
Author: Ankit Singhal 
Authored: Thu Jun 7 11:23:16 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 7 11:23:16 2018 -0700

--
 .../org/apache/phoenix/end2end/SequenceIT.java  |  57 +-
 .../phoenix/end2end/SequencePointInTimeIT.java  | 112 +++
 .../query/ConnectionQueryServicesImpl.java  |  13 ++-
 .../query/ConnectionlessQueryServicesImpl.java  |   7 +-
 .../apache/phoenix/query/QueryConstants.java|   2 +
 .../org/apache/phoenix/schema/Sequence.java |   6 +-
 6 files changed, 139 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a7172a4/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
index 4cc9628..b76cc4e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
@@ -18,6 +18,8 @@
 
 package org.apache.phoenix.end2end;
 
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE;
 import static 
org.apache.phoenix.query.QueryServicesTestImpl.DEFAULT_SEQUENCE_CACHE_SIZE;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
@@ -38,6 +40,7 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesTestImpl;
 import org.apache.phoenix.schema.SchemaNotFoundException;
 import org.apache.phoenix.schema.SequenceAlreadyExistsException;
 import org.apache.phoenix.schema.SequenceNotFoundException;
@@ -202,6 +205,8 @@ public class SequenceIT extends ParallelStatsDisabledIT {
 String schemaName = getSchemaName(sequenceName);
 
 conn.createStatement().execute("CREATE SEQUENCE " + sequenceName + " 
START WITH 2 INCREMENT BY 4");
+int bucketNum = PhoenixRuntime.getTableNoCache(conn, 
SYSTEM_CATALOG_SCHEMA + "." + TYPE_SEQUENCE).getBucketNum();
+assertEquals("Salt bucket for SYSTEM.SEQUENCE should be test 
default",bucketNum , QueryServicesTestImpl.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS);
 String query = "SELECT sequence_schema, sequence_name, current_value, 
increment_by FROM \"SYSTEM\".\"SEQUENCE\" WHERE sequence_name='" + 
sequenceNameWithoutSchema + "'";
 ResultSet rs = conn.prepareStatement(query).executeQuery();
 assertTrue(rs.next());
@@ -1406,56 +1411,4 @@ public class SequenceIT extends ParallelStatsDisabledIT {
return tableName.substring(tableName.indexOf(".") + 1, 
tableName.length());
 }
 
-@Test
-public void testPointInTimeSequence() throws Exception {
-String seqName = generateSequenceNameWithSchema(); 
-Properties scnProps = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-scnProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(EnvironmentEdgeManager.currentTimeMillis()));
-Connection beforeSeqConn = DriverManager.getConnection(getUrl(), 
scnProps);
-
-ResultSet rs;
-Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-Connection conn = DriverManager.getConnection(getUrl(), props);
-conn.createStatement().execute("CREATE SEQUENCE " + seqName + "");
-
-try {
-beforeSeqConn.createStatement().executeQuery("SELECT next value 
for " + seqName);
-fail();
-} catch (SequenceNotFoundException e) {
-beforeSeqConn.close();
-}
-
-scnProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(EnvironmentEdgeManager.currentTimeMillis()));
-Connection afterSeqConn = DriverManager.getConnection(getUrl(), 
scnProps);
-
-rs = conn.createStatement().executeQuery("SELECT next value for " + 
seqName);
-assertTrue(rs.next());
-assertEquals(1, rs.getInt(1));
-rs = conn.createStatement().executeQuery(&

[2/2] phoenix git commit: PHOENIX-4544 Update statistics inconsistent behavior

2018-06-07 Thread ankit
PHOENIX-4544 Update statistics inconsistent behavior


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/406eb70d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/406eb70d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/406eb70d

Branch: refs/heads/master
Commit: 406eb70d3ec5453ad20b9a981a86ed00990aa706
Parents: dd317c7
Author: Ankit Singhal 
Authored: Thu Jun 7 11:22:08 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 7 11:22:08 2018 -0700

--
 .../StatisticsCollectionRunTrackerIT.java   | 32 --
 .../UngroupedAggregateRegionObserver.java   |  4 +-
 .../apache/phoenix/schema/MetaDataClient.java   | 10 -
 .../stats/StatisticsCollectionRunTracker.java   | 45 +---
 .../java/org/apache/phoenix/util/ByteUtil.java  | 16 ++-
 5 files changed, 85 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/406eb70d/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
index cf475f9..a643383 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
@@ -25,6 +25,8 @@ import static org.junit.Assert.assertTrue;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
+import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
@@ -61,12 +63,15 @@ public class StatisticsCollectionRunTrackerIT extends 
ParallelStatsEnabledIT {
 StatisticsCollectionRunTracker tracker =
 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
 // assert that the region wasn't added to the tracker
-assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo));
+assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");
 // assert that removing the region from the tracker works
-assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo));
+assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");
 runUpdateStats(tableName);
 // assert that after update stats is complete, tracker isn't tracking 
the region any more
-assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo));
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");;
 }
 
 @Test
@@ -103,25 +108,27 @@ public class StatisticsCollectionRunTrackerIT extends 
ParallelStatsEnabledIT {
 HRegionInfo regionInfo = createTableAndGetRegion(tableName);
 // simulate stats collection via major compaction by marking the 
region as compacting in the tracker
 markRegionAsCompacting(regionInfo);
-Assert.assertEquals("Row count didn't match", 
COMPACTION_UPDATE_STATS_ROW_COUNT, runUpdateStats(tableName));
-StatisticsCollectionRunTracker tracker =
+// there will be no update for local index and a table , so checking 2 
* COMPACTION_UPDATE_STATS_ROW_COUNT
+Assert.assertEquals("Row count didn't match", 
COMPACTION_UPDATE_STATS_ROW_COUNT * 2, runUpdateStats(tableName));
 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
 // assert that the tracker state was cleared.
-assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo));
+HashSet familyMap = new 
HashSet(Arrays.asList(Bytes.toBytes("0")));
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, 
familyMap));
 }
 
 @Test
 public void testUpdateStatsPreventsAnotherUpdateStatsFromRunning() throws 
Exception {
 String tableName = fullTableName;
 HRegionInfo regionInfo = createTableAndGetRegion(tableName);
-markRunningUpdateStats(regionInfo);
-   

[1/2] phoenix git commit: PHOENIX-4772 phoenix.sequence.saltBuckets is not honoured

2018-06-07 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/master 69bb8b073 -> 406eb70d3


PHOENIX-4772 phoenix.sequence.saltBuckets is not honoured


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/dd317c7f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/dd317c7f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/dd317c7f

Branch: refs/heads/master
Commit: dd317c7f242aa59e796e36d58290cf70c5b3e0d1
Parents: 69bb8b0
Author: Ankit Singhal 
Authored: Thu Jun 7 11:09:08 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 7 11:09:08 2018 -0700

--
 .../org/apache/phoenix/end2end/SequenceIT.java  |  57 +-
 .../phoenix/end2end/SequencePointInTimeIT.java  | 112 +++
 .../query/ConnectionQueryServicesImpl.java  |  13 ++-
 .../query/ConnectionlessQueryServicesImpl.java  |   7 +-
 .../apache/phoenix/query/QueryConstants.java|   2 +
 .../org/apache/phoenix/schema/Sequence.java |   6 +-
 6 files changed, 139 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/dd317c7f/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
index 4cc9628..b76cc4e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
@@ -18,6 +18,8 @@
 
 package org.apache.phoenix.end2end;
 
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE;
 import static 
org.apache.phoenix.query.QueryServicesTestImpl.DEFAULT_SEQUENCE_CACHE_SIZE;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
@@ -38,6 +40,7 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesTestImpl;
 import org.apache.phoenix.schema.SchemaNotFoundException;
 import org.apache.phoenix.schema.SequenceAlreadyExistsException;
 import org.apache.phoenix.schema.SequenceNotFoundException;
@@ -202,6 +205,8 @@ public class SequenceIT extends ParallelStatsDisabledIT {
 String schemaName = getSchemaName(sequenceName);
 
 conn.createStatement().execute("CREATE SEQUENCE " + sequenceName + " 
START WITH 2 INCREMENT BY 4");
+int bucketNum = PhoenixRuntime.getTableNoCache(conn, 
SYSTEM_CATALOG_SCHEMA + "." + TYPE_SEQUENCE).getBucketNum();
+assertEquals("Salt bucket for SYSTEM.SEQUENCE should be test 
default",bucketNum , QueryServicesTestImpl.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS);
 String query = "SELECT sequence_schema, sequence_name, current_value, 
increment_by FROM \"SYSTEM\".\"SEQUENCE\" WHERE sequence_name='" + 
sequenceNameWithoutSchema + "'";
 ResultSet rs = conn.prepareStatement(query).executeQuery();
 assertTrue(rs.next());
@@ -1406,56 +1411,4 @@ public class SequenceIT extends ParallelStatsDisabledIT {
return tableName.substring(tableName.indexOf(".") + 1, 
tableName.length());
 }
 
-@Test
-public void testPointInTimeSequence() throws Exception {
-String seqName = generateSequenceNameWithSchema(); 
-Properties scnProps = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-scnProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(EnvironmentEdgeManager.currentTimeMillis()));
-Connection beforeSeqConn = DriverManager.getConnection(getUrl(), 
scnProps);
-
-ResultSet rs;
-Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-Connection conn = DriverManager.getConnection(getUrl(), props);
-conn.createStatement().execute("CREATE SEQUENCE " + seqName + "");
-
-try {
-beforeSeqConn.createStatement().executeQuery("SELECT next value 
for " + seqName);
-fail();
-} catch (SequenceNotFoundException e) {
-beforeSeqConn.close();
-}
-
-scnProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(EnvironmentEdgeManager.currentTimeMillis()));
-Connection afterSeqConn = DriverManager.getConnection(getUrl(), 
scnProps);
-
-rs = conn.createStatement().executeQuery("SELECT next value for " + 
seqName);
-assertTrue(rs.next());
-ass

phoenix git commit: PHOENIX-4772 phoenix.sequence.saltBuckets is not honoured

2018-06-07 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 1b18d3474 -> abcf0d1ab


PHOENIX-4772 phoenix.sequence.saltBuckets is not honoured


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/abcf0d1a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/abcf0d1a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/abcf0d1a

Branch: refs/heads/5.x-HBase-2.0
Commit: abcf0d1ab3a85f8aa9bc5f5f5d54f6b229cfa247
Parents: 1b18d34
Author: Ankit Singhal 
Authored: Thu Jun 7 11:02:55 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 7 11:02:55 2018 -0700

--
 .../org/apache/phoenix/end2end/SequenceIT.java  |  57 +-
 .../phoenix/end2end/SequencePointInTimeIT.java  | 112 +++
 .../query/ConnectionQueryServicesImpl.java  |  13 ++-
 .../query/ConnectionlessQueryServicesImpl.java  |   8 +-
 .../apache/phoenix/query/QueryConstants.java|   2 +
 .../org/apache/phoenix/schema/Sequence.java |   6 +-
 6 files changed, 139 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/abcf0d1a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
index 4cc9628..b76cc4e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
@@ -18,6 +18,8 @@
 
 package org.apache.phoenix.end2end;
 
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE;
 import static 
org.apache.phoenix.query.QueryServicesTestImpl.DEFAULT_SEQUENCE_CACHE_SIZE;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
@@ -38,6 +40,7 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesTestImpl;
 import org.apache.phoenix.schema.SchemaNotFoundException;
 import org.apache.phoenix.schema.SequenceAlreadyExistsException;
 import org.apache.phoenix.schema.SequenceNotFoundException;
@@ -202,6 +205,8 @@ public class SequenceIT extends ParallelStatsDisabledIT {
 String schemaName = getSchemaName(sequenceName);
 
 conn.createStatement().execute("CREATE SEQUENCE " + sequenceName + " 
START WITH 2 INCREMENT BY 4");
+int bucketNum = PhoenixRuntime.getTableNoCache(conn, 
SYSTEM_CATALOG_SCHEMA + "." + TYPE_SEQUENCE).getBucketNum();
+assertEquals("Salt bucket for SYSTEM.SEQUENCE should be test 
default",bucketNum , QueryServicesTestImpl.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS);
 String query = "SELECT sequence_schema, sequence_name, current_value, 
increment_by FROM \"SYSTEM\".\"SEQUENCE\" WHERE sequence_name='" + 
sequenceNameWithoutSchema + "'";
 ResultSet rs = conn.prepareStatement(query).executeQuery();
 assertTrue(rs.next());
@@ -1406,56 +1411,4 @@ public class SequenceIT extends ParallelStatsDisabledIT {
return tableName.substring(tableName.indexOf(".") + 1, 
tableName.length());
 }
 
-@Test
-public void testPointInTimeSequence() throws Exception {
-String seqName = generateSequenceNameWithSchema(); 
-Properties scnProps = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-scnProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(EnvironmentEdgeManager.currentTimeMillis()));
-Connection beforeSeqConn = DriverManager.getConnection(getUrl(), 
scnProps);
-
-ResultSet rs;
-Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-Connection conn = DriverManager.getConnection(getUrl(), props);
-conn.createStatement().execute("CREATE SEQUENCE " + seqName + "");
-
-try {
-beforeSeqConn.createStatement().executeQuery("SELECT next value 
for " + seqName);
-fail();
-} catch (SequenceNotFoundException e) {
-beforeSeqConn.close();
-}
-
-scnProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(EnvironmentEdgeManager.currentTimeMillis()));
-Connection afterSeqConn = DriverManager.getConnection(getUrl(), 
scnProps);
-
-rs = conn.createStatement().executeQuery("SELECT next value for " + 
seqName);
-asser

phoenix git commit: PHOENIX-4544 Update statistics inconsistent behavior

2018-06-07 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 5aebc96b6 -> 1b18d3474


PHOENIX-4544 Update statistics inconsistent behavior


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1b18d347
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1b18d347
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1b18d347

Branch: refs/heads/5.x-HBase-2.0
Commit: 1b18d3474d2e3deca429374dac60062b48fe1592
Parents: 5aebc96
Author: Ankit Singhal 
Authored: Thu Jun 7 11:01:14 2018 -0700
Committer: Ankit Singhal 
Committed: Thu Jun 7 11:01:14 2018 -0700

--
 .../StatisticsCollectionRunTrackerIT.java   | 32 +-
 .../UngroupedAggregateRegionObserver.java   |  7 +--
 .../apache/phoenix/schema/MetaDataClient.java   |  8 ++--
 .../stats/StatisticsCollectionRunTracker.java   | 46 +---
 .../java/org/apache/phoenix/util/ByteUtil.java  | 16 ++-
 5 files changed, 83 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1b18d347/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
index 71c9e01..cdf1fde 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
@@ -25,12 +25,15 @@ import static org.junit.Assert.assertTrue;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
+import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.ParallelStatsEnabledIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker;
@@ -60,12 +63,15 @@ public class StatisticsCollectionRunTrackerIT extends 
ParallelStatsEnabledIT {
 StatisticsCollectionRunTracker tracker =
 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
 // assert that the region wasn't added to the tracker
-assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo));
+assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");
 // assert that removing the region from the tracker works
-assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo));
+assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");
 runUpdateStats(tableName);
 // assert that after update stats is complete, tracker isn't tracking 
the region any more
-assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo));
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");;
 }
 
 @Test
@@ -102,25 +108,28 @@ public class StatisticsCollectionRunTrackerIT extends 
ParallelStatsEnabledIT {
 RegionInfo regionInfo = createTableAndGetRegion(tableName);
 // simulate stats collection via major compaction by marking the 
region as compacting in the tracker
 markRegionAsCompacting(regionInfo);
-Assert.assertEquals("Row count didn't match", 
COMPACTION_UPDATE_STATS_ROW_COUNT, runUpdateStats(tableName));
+//there will be no update for local index and a table , so checking 2 
* COMPACTION_UPDATE_STATS_ROW_COUNT
+Assert.assertEquals("Row count didn't match", 
COMPACTION_UPDATE_STATS_ROW_COUNT * 2, runUpdateStats(tableName));
 StatisticsCollectionRunTracker tracker =
 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
 // assert that the tracker state was cleared.
-assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo

[2/2] phoenix git commit: PHOENIX-4768 Re-enable testCompactUpdatesStats and testCompactUpdatesStatsWithMinStatsUpdateFreq of StatsCollectorIT

2018-06-01 Thread ankit
PHOENIX-4768 Re-enable testCompactUpdatesStats and 
testCompactUpdatesStatsWithMinStatsUpdateFreq of StatsCollectorIT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1ae275c1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1ae275c1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1ae275c1

Branch: refs/heads/4.x-HBase-0.98
Commit: 1ae275c15a65ce70c3ac7f00e6751b80dc270ca2
Parents: 9d29e90
Author: Ankit Singhal 
Authored: Fri Jun 1 14:45:45 2018 -0700
Committer: Ankit Singhal 
Committed: Fri Jun 1 14:45:45 2018 -0700

--
 .../org/apache/phoenix/schema/stats/StatsCollectorIT.java | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1ae275c1/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
index 09d28f8..5436311 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
@@ -68,7 +68,6 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -404,13 +403,11 @@ public abstract class StatsCollectorIT extends 
BaseUniqueNamesOwnClusterIT {
 }
 
 @Test
-@Ignore //TODO remove this once  
https://issues.apache.org/jira/browse/TEPHRA-208 is fixed
 public void testCompactUpdatesStats() throws Exception {
 testCompactUpdatesStats(0, fullTableName);
 }
 
 @Test
-@Ignore //TODO remove this once  
https://issues.apache.org/jira/browse/TEPHRA-208 is fixed
 public void testCompactUpdatesStatsWithMinStatsUpdateFreq() throws 
Exception {
 
testCompactUpdatesStats(QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS, 
fullTableName);
 }
@@ -463,7 +460,7 @@ public abstract class StatsCollectorIT extends 
BaseUniqueNamesOwnClusterIT {
 Scan scan = new Scan();
 scan.setRaw(true);
 PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
-try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
+try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(physicalTableName))) {
 ResultScanner scanner = htable.getScanner(scan);
 Result result;
 while ((result = scanner.next())!=null) {
@@ -476,7 +473,7 @@ public abstract class StatsCollectorIT extends 
BaseUniqueNamesOwnClusterIT {
 scan = new Scan();
 scan.setRaw(true);
 phxConn = conn.unwrap(PhoenixConnection.class);
-try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
+try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(physicalTableName))) {
 ResultScanner scanner = htable.getScanner(scan);
 Result result;
 while ((result = scanner.next())!=null) {



[1/2] phoenix git commit: PHOENIX-4769 Annotate SystemCatalogIT so that it will run with the test suite

2018-06-01 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 5ce06a062 -> 1ae275c15


PHOENIX-4769 Annotate SystemCatalogIT so that it will run with the test suite


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9d29e905
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9d29e905
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9d29e905

Branch: refs/heads/4.x-HBase-0.98
Commit: 9d29e9051766d65a3cc2412ea1be09f111602a0d
Parents: 5ce06a0
Author: Ankit Singhal 
Authored: Fri Jun 1 14:45:36 2018 -0700
Committer: Ankit Singhal 
Committed: Fri Jun 1 14:45:36 2018 -0700

--
 .../apache/phoenix/end2end/SystemCatalogIT.java   | 18 --
 1 file changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9d29e905/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
index acc7873..c21c399 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
@@ -18,7 +18,6 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -26,13 +25,17 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.junit.After;
 import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
-public class SystemCatalogIT {
+@Category(NeedsOwnMiniClusterTest.class)
+public class SystemCatalogIT extends BaseTest {
 private HBaseTestingUtility testUtil = null;
 
 @After
@@ -55,10 +58,13 @@ public class SystemCatalogIT {
 }
 TableName systemCatalog = TableName.valueOf("SYSTEM.CATALOG");
 assertEquals(1, 
testUtil.getHBaseAdmin().getTableRegions(systemCatalog).size());
-
-// now attempt to split SYSTEM.CATALOG
-testUtil.getHBaseAdmin().split(systemCatalog.getName());
-
+try {
+// now attempt to split SYSTEM.CATALOG
+testUtil.getHBaseAdmin().split(systemCatalog.getName());
+} catch (DoNotRetryIOException e) {
+// table is not splittable
+assert (e.getMessage().contains("NOT splittable"));
+}
 // make sure the split finishes (there's no synchronous splitting 
before HBase 2.x)
 testUtil.getHBaseAdmin().disableTable(systemCatalog);
 testUtil.getHBaseAdmin().enableTable(systemCatalog);



[2/2] phoenix git commit: PHOENIX-4769 Annotate SystemCatalogIT so that it will run with the test suite

2018-06-01 Thread ankit
PHOENIX-4769 Annotate SystemCatalogIT so that it will run with the test suite


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/27a66607
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/27a66607
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/27a66607

Branch: refs/heads/4.x-HBase-1.1
Commit: 27a66607f3f67273367c7c5d2b1d20b721ec227b
Parents: 8657ce2
Author: Ankit Singhal 
Authored: Fri Jun 1 14:43:30 2018 -0700
Committer: Ankit Singhal 
Committed: Fri Jun 1 14:43:30 2018 -0700

--
 .../apache/phoenix/end2end/SystemCatalogIT.java | 23 +---
 1 file changed, 15 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/27a66607/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
index 15af2af..2626ad6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
@@ -19,7 +19,6 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -27,14 +26,18 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.junit.After;
 import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
-public class SystemCatalogIT {
+@Category(NeedsOwnMiniClusterTest.class)
+public class SystemCatalogIT extends BaseTest {
 private HBaseTestingUtility testUtil = null;
 
 @After
@@ -58,13 +61,17 @@ public class SystemCatalogIT {
 TableName systemCatalog = TableName.valueOf("SYSTEM.CATALOG");
 RegionLocator rl = 
testUtil.getConnection().getRegionLocator(systemCatalog);
 assertEquals(rl.getAllRegionLocations().size(), 1);
+try {
+// now attempt to split SYSTEM.CATALOG
+testUtil.getHBaseAdmin().split(systemCatalog);
 
-// now attempt to split SYSTEM.CATALOG
-testUtil.getHBaseAdmin().split(systemCatalog);
-
-// make sure the split finishes (there's no synchronous splitting 
before HBase 2.x)
-testUtil.getHBaseAdmin().disableTable(systemCatalog);
-testUtil.getHBaseAdmin().enableTable(systemCatalog);
+// make sure the split finishes (there's no synchronous splitting 
before HBase 2.x)
+testUtil.getHBaseAdmin().disableTable(systemCatalog);
+testUtil.getHBaseAdmin().enableTable(systemCatalog);
+} catch (DoNotRetryIOException e) {
+// table is not splittable
+assert (e.getMessage().contains("NOT splittable"));
+}
 
 // test again... Must still be exactly one region.
 rl = testUtil.getConnection().getRegionLocator(systemCatalog);



[1/2] phoenix git commit: PHOENIX-4768 Re-enable testCompactUpdatesStats and testCompactUpdatesStatsWithMinStatsUpdateFreq of StatsCollectorIT

2018-06-01 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 f8d09637c -> 27a66607f


PHOENIX-4768 Re-enable testCompactUpdatesStats and 
testCompactUpdatesStatsWithMinStatsUpdateFreq of StatsCollectorIT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8657ce2c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8657ce2c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8657ce2c

Branch: refs/heads/4.x-HBase-1.1
Commit: 8657ce2cd2eca81d22ccaa78f4b2183a40f4935b
Parents: f8d0963
Author: Ankit Singhal 
Authored: Fri Jun 1 14:43:22 2018 -0700
Committer: Ankit Singhal 
Committed: Fri Jun 1 14:43:22 2018 -0700

--
 .../org/apache/phoenix/schema/stats/StatsCollectorIT.java | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8657ce2c/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
index 09d28f8..5436311 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
@@ -68,7 +68,6 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -404,13 +403,11 @@ public abstract class StatsCollectorIT extends 
BaseUniqueNamesOwnClusterIT {
 }
 
 @Test
-@Ignore //TODO remove this once  
https://issues.apache.org/jira/browse/TEPHRA-208 is fixed
 public void testCompactUpdatesStats() throws Exception {
 testCompactUpdatesStats(0, fullTableName);
 }
 
 @Test
-@Ignore //TODO remove this once  
https://issues.apache.org/jira/browse/TEPHRA-208 is fixed
 public void testCompactUpdatesStatsWithMinStatsUpdateFreq() throws 
Exception {
 
testCompactUpdatesStats(QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS, 
fullTableName);
 }
@@ -463,7 +460,7 @@ public abstract class StatsCollectorIT extends 
BaseUniqueNamesOwnClusterIT {
 Scan scan = new Scan();
 scan.setRaw(true);
 PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
-try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
+try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(physicalTableName))) {
 ResultScanner scanner = htable.getScanner(scan);
 Result result;
 while ((result = scanner.next())!=null) {
@@ -476,7 +473,7 @@ public abstract class StatsCollectorIT extends 
BaseUniqueNamesOwnClusterIT {
 scan = new Scan();
 scan.setRaw(true);
 phxConn = conn.unwrap(PhoenixConnection.class);
-try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
+try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(physicalTableName))) {
 ResultScanner scanner = htable.getScanner(scan);
 Result result;
 while ((result = scanner.next())!=null) {



[2/2] phoenix git commit: PHOENIX-4768 Re-enable testCompactUpdatesStats and testCompactUpdatesStatsWithMinStatsUpdateFreq of StatsCollectorIT

2018-06-01 Thread ankit
PHOENIX-4768 Re-enable testCompactUpdatesStats and 
testCompactUpdatesStatsWithMinStatsUpdateFreq of StatsCollectorIT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ce834bec
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ce834bec
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ce834bec

Branch: refs/heads/4.x-HBase-1.2
Commit: ce834bec462094a203e1c99b52ce82b8da89
Parents: 091ffbd
Author: Ankit Singhal 
Authored: Fri Jun 1 14:43:01 2018 -0700
Committer: Ankit Singhal 
Committed: Fri Jun 1 14:43:01 2018 -0700

--
 .../org/apache/phoenix/schema/stats/StatsCollectorIT.java | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ce834bec/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
index 09d28f8..5436311 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
@@ -68,7 +68,6 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -404,13 +403,11 @@ public abstract class StatsCollectorIT extends 
BaseUniqueNamesOwnClusterIT {
 }
 
 @Test
-@Ignore //TODO remove this once  
https://issues.apache.org/jira/browse/TEPHRA-208 is fixed
 public void testCompactUpdatesStats() throws Exception {
 testCompactUpdatesStats(0, fullTableName);
 }
 
 @Test
-@Ignore //TODO remove this once  
https://issues.apache.org/jira/browse/TEPHRA-208 is fixed
 public void testCompactUpdatesStatsWithMinStatsUpdateFreq() throws 
Exception {
 
testCompactUpdatesStats(QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS, 
fullTableName);
 }
@@ -463,7 +460,7 @@ public abstract class StatsCollectorIT extends 
BaseUniqueNamesOwnClusterIT {
 Scan scan = new Scan();
 scan.setRaw(true);
 PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
-try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
+try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(physicalTableName))) {
 ResultScanner scanner = htable.getScanner(scan);
 Result result;
 while ((result = scanner.next())!=null) {
@@ -476,7 +473,7 @@ public abstract class StatsCollectorIT extends 
BaseUniqueNamesOwnClusterIT {
 scan = new Scan();
 scan.setRaw(true);
 phxConn = conn.unwrap(PhoenixConnection.class);
-try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
+try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(physicalTableName))) {
 ResultScanner scanner = htable.getScanner(scan);
 Result result;
 while ((result = scanner.next())!=null) {



[1/2] phoenix git commit: PHOENIX-4769 Annotate SystemCatalogIT so that it will run with the test suite

2018-06-01 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 445f72750 -> ce834bec4


PHOENIX-4769 Annotate SystemCatalogIT so that it will run with the test suite


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/091ffbd8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/091ffbd8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/091ffbd8

Branch: refs/heads/4.x-HBase-1.2
Commit: 091ffbd856275a95f826a7915db57e8918448e53
Parents: 445f727
Author: Ankit Singhal 
Authored: Fri Jun 1 14:42:50 2018 -0700
Committer: Ankit Singhal 
Committed: Fri Jun 1 14:42:50 2018 -0700

--
 .../apache/phoenix/end2end/SystemCatalogIT.java | 23 +---
 1 file changed, 15 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/091ffbd8/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
index 7b6a543..6f49518 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
@@ -18,7 +18,6 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -26,14 +25,18 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.junit.After;
 import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
-public class SystemCatalogIT {
+@Category(NeedsOwnMiniClusterTest.class)
+public class SystemCatalogIT extends BaseTest {
 private HBaseTestingUtility testUtil = null;
 
 @After
@@ -57,13 +60,17 @@ public class SystemCatalogIT {
 TableName systemCatalog = TableName.valueOf("SYSTEM.CATALOG");
 RegionLocator rl = 
testUtil.getConnection().getRegionLocator(systemCatalog);
 assertEquals(rl.getAllRegionLocations().size(), 1);
+try {
+// now attempt to split SYSTEM.CATALOG
+testUtil.getHBaseAdmin().split(systemCatalog);
 
-// now attempt to split SYSTEM.CATALOG
-testUtil.getHBaseAdmin().split(systemCatalog);
-
-// make sure the split finishes (there's no synchronous splitting 
before HBase 2.x)
-testUtil.getHBaseAdmin().disableTable(systemCatalog);
-testUtil.getHBaseAdmin().enableTable(systemCatalog);
+// make sure the split finishes (there's no synchronous splitting 
before HBase 2.x)
+testUtil.getHBaseAdmin().disableTable(systemCatalog);
+testUtil.getHBaseAdmin().enableTable(systemCatalog);
+} catch (DoNotRetryIOException e) {
+// table is not splittable
+assert (e.getMessage().contains("NOT splittable"));
+}
 
 // test again... Must still be exactly one region.
 rl = testUtil.getConnection().getRegionLocator(systemCatalog);



[1/2] phoenix git commit: PHOENIX-4768 Re-enable testCompactUpdatesStats and testCompactUpdatesStatsWithMinStatsUpdateFreq of StatsCollectorIT

2018-06-01 Thread ankit
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 f3119320b -> 190ff8401


PHOENIX-4768 Re-enable testCompactUpdatesStats and 
testCompactUpdatesStatsWithMinStatsUpdateFreq of StatsCollectorIT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2f67e9f9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2f67e9f9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2f67e9f9

Branch: refs/heads/4.x-HBase-1.3
Commit: 2f67e9f9d5f6008c08df3ff0328c10599e8878e2
Parents: f311932
Author: Ankit Singhal 
Authored: Fri Jun 1 14:42:00 2018 -0700
Committer: Ankit Singhal 
Committed: Fri Jun 1 14:42:00 2018 -0700

--
 .../org/apache/phoenix/schema/stats/StatsCollectorIT.java | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f67e9f9/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
index 09d28f8..5436311 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
@@ -68,7 +68,6 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -404,13 +403,11 @@ public abstract class StatsCollectorIT extends 
BaseUniqueNamesOwnClusterIT {
 }
 
 @Test
-@Ignore //TODO remove this once  
https://issues.apache.org/jira/browse/TEPHRA-208 is fixed
 public void testCompactUpdatesStats() throws Exception {
 testCompactUpdatesStats(0, fullTableName);
 }
 
 @Test
-@Ignore //TODO remove this once  
https://issues.apache.org/jira/browse/TEPHRA-208 is fixed
 public void testCompactUpdatesStatsWithMinStatsUpdateFreq() throws 
Exception {
 
testCompactUpdatesStats(QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS, 
fullTableName);
 }
@@ -463,7 +460,7 @@ public abstract class StatsCollectorIT extends 
BaseUniqueNamesOwnClusterIT {
 Scan scan = new Scan();
 scan.setRaw(true);
 PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
-try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
+try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(physicalTableName))) {
 ResultScanner scanner = htable.getScanner(scan);
 Result result;
 while ((result = scanner.next())!=null) {
@@ -476,7 +473,7 @@ public abstract class StatsCollectorIT extends 
BaseUniqueNamesOwnClusterIT {
 scan = new Scan();
 scan.setRaw(true);
 phxConn = conn.unwrap(PhoenixConnection.class);
-try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
+try (HTableInterface htable = 
phxConn.getQueryServices().getTable(Bytes.toBytes(physicalTableName))) {
 ResultScanner scanner = htable.getScanner(scan);
 Result result;
 while ((result = scanner.next())!=null) {



[2/2] phoenix git commit: PHOENIX-4769 Annotate SystemCatalogIT so that it will run with the test suite

2018-06-01 Thread ankit
PHOENIX-4769 Annotate SystemCatalogIT so that it will run with the test suite


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/190ff840
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/190ff840
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/190ff840

Branch: refs/heads/4.x-HBase-1.3
Commit: 190ff8401a4b155ccf5a849aced0e1cbade92445
Parents: 2f67e9f
Author: Ankit Singhal 
Authored: Fri Jun 1 14:42:27 2018 -0700
Committer: Ankit Singhal 
Committed: Fri Jun 1 14:42:27 2018 -0700

--
 .../apache/phoenix/end2end/SystemCatalogIT.java | 23 +---
 1 file changed, 15 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/190ff840/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
index 7b6a543..6f49518 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogIT.java
@@ -18,7 +18,6 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -26,14 +25,18 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.junit.After;
 import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
-public class SystemCatalogIT {
+@Category(NeedsOwnMiniClusterTest.class)
+public class SystemCatalogIT extends BaseTest {
 private HBaseTestingUtility testUtil = null;
 
 @After
@@ -57,13 +60,17 @@ public class SystemCatalogIT {
 TableName systemCatalog = TableName.valueOf("SYSTEM.CATALOG");
 RegionLocator rl = 
testUtil.getConnection().getRegionLocator(systemCatalog);
 assertEquals(rl.getAllRegionLocations().size(), 1);
+try {
+// now attempt to split SYSTEM.CATALOG
+testUtil.getHBaseAdmin().split(systemCatalog);
 
-// now attempt to split SYSTEM.CATALOG
-testUtil.getHBaseAdmin().split(systemCatalog);
-
-// make sure the split finishes (there's no synchronous splitting 
before HBase 2.x)
-testUtil.getHBaseAdmin().disableTable(systemCatalog);
-testUtil.getHBaseAdmin().enableTable(systemCatalog);
+// make sure the split finishes (there's no synchronous splitting 
before HBase 2.x)
+testUtil.getHBaseAdmin().disableTable(systemCatalog);
+testUtil.getHBaseAdmin().enableTable(systemCatalog);
+} catch (DoNotRetryIOException e) {
+// table is not splittable
+assert (e.getMessage().contains("NOT splittable"));
+}
 
 // test again... Must still be exactly one region.
 rl = testUtil.getConnection().getRegionLocator(systemCatalog);



  1   2   3   4   5   6   7   >