[phoenix] branch 5.1 updated: PHOENIX-6604 Allow using indexes for wildcard topN queries on salted tables

2021-12-16 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
 new fb3467c  PHOENIX-6604 Allow using indexes for wildcard topN queries on 
salted tables
fb3467c is described below

commit fb3467cefdbbd5f76d2135c34a8ee376c3dd085f
Author: Lars 
AuthorDate: Sat Dec 4 09:22:06 2021 -0800

PHOENIX-6604 Allow using indexes for wildcard topN queries on salted tables
---
 .../org/apache/phoenix/end2end/index/SaltedIndexIT.java  | 16 
 .../apache/phoenix/compile/TupleProjectionCompiler.java  |  5 +
 2 files changed, 21 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/SaltedIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/SaltedIndexIT.java
index 5ebee5c..b1ed74e 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/SaltedIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/SaltedIndexIT.java
@@ -219,5 +219,21 @@ public class SaltedIndexIT extends ParallelStatsDisabledIT 
{
  "CLIENT 2 ROW LIMIT";
 String explainPlan = QueryUtil.getExplainPlan(rs);
 assertEquals(expectedPlan,explainPlan);
+
+// PHOENIX-6604
+query = "SELECT * FROM " + dataTableFullName + " ORDER BY v DESC LIMIT 
1";
+rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+expectedPlan = indexSaltBuckets == null ?
+"CLIENT SERIAL 1-WAY FULL SCAN OVER " + indexTableFullName + "\n"
+  + "SERVER FILTER BY FIRST KEY ONLY\n"
+  + "SERVER 1 ROW LIMIT\n"
+  + "CLIENT 1 ROW LIMIT"
+:
+"CLIENT PARALLEL 4-WAY FULL SCAN OVER " + indexTableFullName + "\n"
+  + "SERVER FILTER BY FIRST KEY ONLY\n"
+  + "SERVER 1 ROW LIMIT\n"
+  + "CLIENT MERGE SORT\n"
+  + "CLIENT 1 ROW LIMIT";
+assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
 }
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
index 9a0b5b7..05dabaf 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
@@ -51,6 +51,7 @@ import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.ProjectedColumn;
+import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.IndexUtil;
@@ -90,6 +91,10 @@ public class TupleProjectionCompiler {
 table.getSchemaName().getString(),
 table.getParentTableName().getString());
 for (PColumn column : 
parentTableRef.getTable().getColumns()) {
+// don't attempt to rewrite the parents SALTING COLUMN
+if (column == SaltingUtil.SALTING_COLUMN) {
+continue;
+}
 NODE_FACTORY.column(null, '"' + 
IndexUtil.getIndexColumnName(column) + '"', null).accept(visitor);
 }
 }


[phoenix] branch master updated (2b2c3f5 -> eac331f)

2021-12-16 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 2b2c3f5  PHOENIX-6603: Add SYSTEM.TRANSFORM table (#656)
 add eac331f  PHOENIX-6604 Allow using indexes for wildcard topN queries on 
salted tables

No new revisions were added by this update.

Summary of changes:
 .../org/apache/phoenix/end2end/index/SaltedIndexIT.java  | 16 
 .../apache/phoenix/compile/TupleProjectionCompiler.java  |  5 +
 2 files changed, 21 insertions(+)


[phoenix] branch 5.1 updated: PHOENIX-6437: Parent-Child Delete marker should get replicated via SystemCatalogWalEntryFilter (#1222)

2021-05-26 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
 new c582e88  PHOENIX-6437: Parent-Child Delete marker should get 
replicated via SystemCatalogWalEntryFilter (#1222)
c582e88 is described below

commit c582e88d268156fe2a286b01459192353910c003
Author: ankitjain64 <34427442+ankitjai...@users.noreply.github.com>
AuthorDate: Wed May 5 16:44:05 2021 -0700

PHOENIX-6437: Parent-Child Delete marker should get replicated via 
SystemCatalogWalEntryFilter (#1222)

Co-authored-by: Ankit Jain 
---
 .../replication/SystemCatalogWALEntryFilterIT.java | 184 ++---
 .../replication/SystemCatalogWALEntryFilter.java   |  25 +--
 2 files changed, 170 insertions(+), 39 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
index 6cd8b78..a590d87 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.replication.ChainWALEntryFilter;
@@ -37,6 +39,7 @@ import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKeyImpl;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.hbase.index.wal.IndexedKeyValue;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.mapreduce.util.ConnectionUtil;
 import org.apache.phoenix.schema.PTable;
@@ -71,8 +74,8 @@ public class SystemCatalogWALEntryFilterIT extends 
ParallelStatsDisabledIT {
   + NONTENANT_VIEW_NAME + "(" + VIEW_COLUMN_NAME + " varchar)  AS SELECT * 
FROM "
   + TestUtil.ENTITY_HISTORY_TABLE_NAME  + " WHERE OLD_VALUE like 'E%'";
 
-  private static final String DROP_TENANT_VIEW_SQL = "DROP VIEW IF EXISTS " + 
TENANT_VIEW_NAME;
-  private static final String DROP_NONTENANT_VIEW_SQL = "DROP VIEW IF EXISTS " 
+ NONTENANT_VIEW_NAME;
+  private static final String DROP_TENANT_VIEW_SQL = "DROP VIEW IF EXISTS " + 
SCHEMA_NAME + "." + TENANT_VIEW_NAME;
+  private static final String DROP_NONTENANT_VIEW_SQL = "DROP VIEW IF EXISTS " 
+ SCHEMA_NAME + "." + NONTENANT_VIEW_NAME;
   private static PTable catalogTable;
   private static PTable childLinkTable;
   private static WALKeyImpl walKeyCatalog = null;
@@ -100,24 +103,13 @@ public class SystemCatalogWALEntryFilterIT extends 
ParallelStatsDisabledIT {
 PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME), 0, 0, uuid);
 };
 Assert.assertNotNull(catalogTable);
-try (java.sql.Connection connection =
- 
ConnectionUtil.getInputConnection(getUtility().getConfiguration(), new 
Properties())) {
-  connection.createStatement().execute(CREATE_NONTENANT_VIEW_SQL);
-};
+createNonTenantView();
   }
 
   @AfterClass
   public static synchronized void tearDown() throws Exception {
-Properties tenantProperties = new Properties();
-tenantProperties.setProperty("TenantId", TENANT_ID);
-try (java.sql.Connection connection =
- 
ConnectionUtil.getInputConnection(getUtility().getConfiguration(), 
tenantProperties)) {
-  connection.createStatement().execute(DROP_TENANT_VIEW_SQL);
-}
-try (java.sql.Connection connection =
- 
ConnectionUtil.getInputConnection(getUtility().getConfiguration(), new 
Properties())) {
-  connection.createStatement().execute(DROP_NONTENANT_VIEW_SQL);
-}
+dropTenantView();
+dropNonTenantView();
   }
 
   @Test
@@ -139,7 +131,7 @@ public class SystemCatalogWALEntryFilterIT extends 
ParallelStatsDisabledIT {
 
 WAL.Entry nonTenantEntryCatalog = getEntry(systemCatalogTableName, 
nonTenantGetCatalog);
 WAL.Entry tenantEntryCatalog = getEntry(systemCatalogTableName, 
tenantGetCatalog);
-int tenantRowCount = getAndAssertTenantCountInEdit(tenantEntryCatalog);
+int tenantRowCount = getAndAssertCountInEdit(tenantEntryCatalog, true);
 Assert.assertTrue(tenantRowCount > 0);
 
 //verify that the tenant view WAL.Entry passes the filter and the 
non-tenant view does not
@@ -156,7 +148,7 @@ public class SystemCatalo

[phoenix] branch 5.1 updated: PHOENIX-6435 Fix ViewTTLIT test flapper

2021-05-26 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
 new ca3c5b1  PHOENIX-6435 Fix ViewTTLIT test flapper
ca3c5b1 is described below

commit ca3c5b190d67d490dd6f28f6d97dcb52715e1373
Author: Xinyi Yan 
AuthorDate: Fri Apr 23 16:54:11 2021 -0700

PHOENIX-6435 Fix ViewTTLIT test flapper
---
 phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLIT.java | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLIT.java
index dff52e8..e77bc37 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLIT.java
@@ -66,6 +66,7 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Assert;
 import org.junit.Test;
+import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -93,6 +94,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+@Category(NeedsOwnMiniClusterTest.class)
 public class ViewTTLIT extends ParallelStatsDisabledIT {
 private static final Logger LOGGER = 
LoggerFactory.getLogger(ViewTTLIT.class);
 private static final String ORG_ID_FMT = "00D0x000%s";


[phoenix] branch 5.1 updated: PHOENIX-6447: Add support for SYSTEM.CHILD_LINK table in systemcatalogwalentryfilter (#1207)

2021-05-26 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
 new a30034c  PHOENIX-6447: Add support for SYSTEM.CHILD_LINK table in 
systemcatalogwalentryfilter (#1207)
a30034c is described below

commit a30034ccf239712a5e209833eeb0a3dfb61c7ba8
Author: Sandeep Pal <50725353+sandeepvina...@users.noreply.github.com>
AuthorDate: Mon Apr 26 13:24:29 2021 -0700

PHOENIX-6447: Add support for SYSTEM.CHILD_LINK table in 
systemcatalogwalentryfilter (#1207)

* PHOENIX-6447: Add support for SYSTEM.CHILD_LINK table in 
systemcatalogwalentryfilter
---
 .../replication/SystemCatalogWALEntryFilterIT.java | 138 +
 .../replication/SystemCatalogWALEntryFilter.java   |  87 ++---
 2 files changed, 184 insertions(+), 41 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
index 4a704a6..6cd8b78 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
@@ -40,6 +40,7 @@ import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.mapreduce.util.ConnectionUtil;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.TestUtil;
@@ -73,10 +74,13 @@ public class SystemCatalogWALEntryFilterIT extends 
ParallelStatsDisabledIT {
   private static final String DROP_TENANT_VIEW_SQL = "DROP VIEW IF EXISTS " + 
TENANT_VIEW_NAME;
   private static final String DROP_NONTENANT_VIEW_SQL = "DROP VIEW IF EXISTS " 
+ NONTENANT_VIEW_NAME;
   private static PTable catalogTable;
-  private static WALKeyImpl walKey = null;
+  private static PTable childLinkTable;
+  private static WALKeyImpl walKeyCatalog = null;
+  private static WALKeyImpl walKeyChildLink = null;
   private static TableName systemCatalogTableName =
   TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
-
+  private static TableName systemChildLinkTableName =
+TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME);
 
   @BeforeClass
   public static synchronized void setup() throws Exception {
@@ -89,7 +93,11 @@ public class SystemCatalogWALEntryFilterIT extends 
ParallelStatsDisabledIT {
   ensureTableCreated(getUrl(), TestUtil.ENTITY_HISTORY_TABLE_NAME);
   connection.createStatement().execute(CREATE_TENANT_VIEW_SQL);
   catalogTable = PhoenixRuntime.getTable(connection, 
PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
-  walKey = new WALKeyImpl(REGION, 
TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME), 0, 0, uuid);
+  childLinkTable = PhoenixRuntime.getTable(connection, 
PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME);
+  walKeyCatalog = new WALKeyImpl(REGION, TableName.valueOf(
+PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME), 0, 0, uuid);
+  walKeyChildLink = new WALKeyImpl(REGION, TableName.valueOf(
+PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME), 0, 0, uuid);
 };
 Assert.assertNotNull(catalogTable);
 try (java.sql.Connection connection =
@@ -124,42 +132,88 @@ public class SystemCatalogWALEntryFilterIT extends 
ParallelStatsDisabledIT {
 
   @Test
   public void testSystemCatalogWALEntryFilter() throws Exception {
+// now create WAL.Entry objects that refer to cells in those view rows in
+// System.Catalog
+Get tenantGetCatalog = getGet(catalogTable, TENANT_BYTES, 
TENANT_VIEW_NAME);
+Get nonTenantGetCatalog = getGet(catalogTable, DEFAULT_TENANT_BYTES, 
NONTENANT_VIEW_NAME);
 
-//now create WAL.Entry objects that refer to cells in those view rows in 
System.Catalog
-
-Get tenantGet = getGet(catalogTable, TENANT_BYTES, TENANT_VIEW_NAME);
-Get nonTenantGet = getGet(catalogTable, DEFAULT_TENANT_BYTES, 
NONTENANT_VIEW_NAME);
-
-WAL.Entry nonTenantEntry = getEntry(systemCatalogTableName, nonTenantGet);
-WAL.Entry tenantEntry = getEntry(systemCatalogTableName, tenantGet);
+WAL.Entry nonTenantEntryCatalog = getEntry(systemCatalogTableName, 
nonTenantGetCatalog);
+WAL.Entry tenantEntryCatalog = getEntry(systemCatalogTableName, 
tenantGetCatalog);
+int tenantRowCount = getAndAssertTenantCountInEdit(tenantEntryCatalog);
+Assert.assertTrue(tenantRowCount > 0);
 
 //verify that the tenant view WAL.Entry passes the filter and the 
non-tenant view does not
 SystemCatalogWALEntryFilter filter = new SystemCatalogWALEntryFilter();
 // Chain the sy

[phoenix] branch 5.1 updated: PHOENIX-6436 OrderedResultIterator overestimates memory requirements.

2021-04-05 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
 new 4d8c976  PHOENIX-6436 OrderedResultIterator overestimates memory 
requirements.
4d8c976 is described below

commit 4d8c976841bcc37dc455a0b64c3766dab4f76934
Author: Lars 
AuthorDate: Sun Apr 4 15:42:54 2021 -0700

PHOENIX-6436 OrderedResultIterator overestimates memory requirements.
---
 .../main/java/org/apache/phoenix/iterate/OrderedResultIterator.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java
index edc7cc0..ac05d02 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java
@@ -197,7 +197,8 @@ public class OrderedResultIterator implements 
PeekingResultIterator {
 // Make sure we don't overflow Long, though this is really unlikely to 
happen.
 assert(limit == null || Long.MAX_VALUE / estimatedEntrySize >= limit + 
this.offset);
 
-this.estimatedByteSize = limit == null ? 0 : (limit + this.offset) * 
estimatedEntrySize;
+// Both BufferedSortedQueue and SizeBoundQueue won't allocate more 
than thresholdBytes.
+this.estimatedByteSize = limit == null ? 0 : Math.min((limit + 
this.offset) * estimatedEntrySize, thresholdBytes);
 this.pageSizeMs = pageSizeMs;
 }
 


[phoenix] branch 4.x updated: PHOENIX-6436 OrderedResultIterator overestimates memory requirements.

2021-04-05 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 5c57004  PHOENIX-6436 OrderedResultIterator overestimates memory 
requirements.
5c57004 is described below

commit 5c57004e2717736370aaec208112e197940e5cc2
Author: Lars 
AuthorDate: Sun Apr 4 15:42:54 2021 -0700

PHOENIX-6436 OrderedResultIterator overestimates memory requirements.
---
 .../main/java/org/apache/phoenix/iterate/OrderedResultIterator.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java
index bb0607c..4179730 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java
@@ -196,7 +196,8 @@ public class OrderedResultIterator implements 
PeekingResultIterator {
 // Make sure we don't overflow Long, though this is really unlikely to 
happen.
 assert(limit == null || Long.MAX_VALUE / estimatedEntrySize >= limit + 
this.offset);
 
-this.estimatedByteSize = limit == null ? 0 : (limit + this.offset) * 
estimatedEntrySize;
+// Both BufferedSortedQueue and SizeBoundQueue won't allocate more 
than thresholdBytes.
+this.estimatedByteSize = limit == null ? 0 : Math.min((limit + 
this.offset) * estimatedEntrySize, thresholdBytes);
 this.pageSizeMs = pageSizeMs;
 }
 


[phoenix] branch master updated (6e1b35d -> 21599ac)

2021-04-05 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 6e1b35d  PHOENIX-6430 Added support for full row update for tables 
when no columns specfied in scenario
 add 21599ac  PHOENIX-6436 OrderedResultIterator overestimates memory 
requirements.

No new revisions were added by this update.

Summary of changes:
 .../main/java/org/apache/phoenix/iterate/OrderedResultIterator.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)


[phoenix] branch 5.1 updated: PHOENIX-6424 SELECT cf1.* FAILS with a WHERE clause including cf2.

2021-03-23 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
 new 2fce483  PHOENIX-6424 SELECT cf1.* FAILS with a WHERE clause including 
cf2.
2fce483 is described below

commit 2fce483df1244957f7fc17a3b5a80f782315ec60
Author: Lars 
AuthorDate: Sun Mar 21 19:14:53 2021 -0700

PHOENIX-6424 SELECT cf1.* FAILS with a WHERE clause including cf2.
---
 .../apache/phoenix/end2end/MultiCfQueryExecIT.java | 65 ++
 .../phoenix/iterate/BaseResultIterators.java   |  4 ++
 .../apache/phoenix/util/EncodedColumnsUtil.java|  2 +-
 3 files changed, 70 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
index 9299f93..a02012b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
@@ -342,6 +342,71 @@ public class MultiCfQueryExecIT extends 
ParallelStatsEnabledIT {
 }
 
 @Test
+public void testCFWildcardProjection() throws Exception {
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+String tableName = generateUniqueName();
+String ddl =
+"CREATE TABLE IF NOT EXISTS " + tableName + " (pk1 INTEGER 
NOT NULL PRIMARY KEY, x.v1 VARCHAR, y.v2 INTEGER)";
+conn.createStatement().execute(ddl);
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(1, 'test', 2)");
+conn.commit();
+
+ResultSet rs = conn.createStatement().executeQuery("SELECT x.* 
FROM "+tableName+" WHERE y.v2 = 2");
+assertTrue(rs.next());
+assertEquals("test", rs.getString(1));
+rs.close();
+
+// make sure this works with a local index as well (only the data 
plan needs to be adjusted)
+conn.createStatement().execute("CREATE LOCAL INDEX " + tableName + 
"_IDX ON " + tableName + "(y.v2)");
+conn.commit();
+
+rs = conn.createStatement().executeQuery("SELECT x.* FROM 
"+tableName+" WHERE y.v2 = 2");
+assertTrue(rs.next());
+assertEquals("test", rs.getString(1));
+rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT y.* FROM 
"+tableName+" WHERE x.v1 <> 'blah'");
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(1));
+rs.close();
+}
+}
+
+@Test
+public void testMultipleCFWildcardProjection() throws Exception {
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+String tableName = generateUniqueName();
+String ddl =
+"CREATE TABLE IF NOT EXISTS " + tableName + " (pk1 INTEGER 
NOT NULL PRIMARY KEY, x.v1 VARCHAR, y.v2 INTEGER, z.v3 INTEGER)";
+conn.createStatement().execute(ddl);
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(1, 'test', 2, 3)");
+conn.commit();
+
+ResultSet rs = conn.createStatement().executeQuery("SELECT x.*, 
z.* FROM "+tableName+" WHERE y.v2 = 2");
+assertTrue(rs.next());
+assertEquals("test", rs.getString(1));
+assertEquals(3, rs.getInt(2));
+rs.close();
+
+// make sure this works with a local index as well (only the data 
plan needs to be adjusted)
+conn.createStatement().execute("CREATE LOCAL INDEX " + tableName + 
"_IDX ON " + tableName + "(y.v2)");
+conn.commit();
+
+rs = conn.createStatement().executeQuery("SELECT x.*, z.* FROM 
"+tableName+" WHERE y.v2 = 2");
+assertTrue(rs.next());
+assertEquals("test", rs.getString(1));
+assertEquals(3, rs.getInt(2));
+rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT x.*, y.* FROM 
"+tableName+" WHERE z.v3 = 3");
+assertTrue(rs.next());
+assertEquals("test", rs.getString(1));
+assertEquals(2, rs.getInt(2));
+rs.close();
+}
+}
+
+@Test
 public void testMixedDefaultAndExplicitCFs() throws Exception {
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String tableName = generateUniqueName();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix

[phoenix] branch 4.x updated: PHOENIX-6424 SELECT cf1.* FAILS with a WHERE clause including cf2.

2021-03-23 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 7198196  PHOENIX-6424 SELECT cf1.* FAILS with a WHERE clause including 
cf2.
7198196 is described below

commit 71981960d50120c9b7f4021de8d29802a69df09b
Author: Lars 
AuthorDate: Sun Mar 21 19:14:53 2021 -0700

PHOENIX-6424 SELECT cf1.* FAILS with a WHERE clause including cf2.
---
 .../apache/phoenix/end2end/MultiCfQueryExecIT.java | 65 ++
 .../phoenix/iterate/BaseResultIterators.java   |  4 ++
 .../apache/phoenix/util/EncodedColumnsUtil.java|  2 +-
 3 files changed, 70 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
index 9299f93..a02012b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
@@ -342,6 +342,71 @@ public class MultiCfQueryExecIT extends 
ParallelStatsEnabledIT {
 }
 
 @Test
+public void testCFWildcardProjection() throws Exception {
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+String tableName = generateUniqueName();
+String ddl =
+"CREATE TABLE IF NOT EXISTS " + tableName + " (pk1 INTEGER 
NOT NULL PRIMARY KEY, x.v1 VARCHAR, y.v2 INTEGER)";
+conn.createStatement().execute(ddl);
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(1, 'test', 2)");
+conn.commit();
+
+ResultSet rs = conn.createStatement().executeQuery("SELECT x.* 
FROM "+tableName+" WHERE y.v2 = 2");
+assertTrue(rs.next());
+assertEquals("test", rs.getString(1));
+rs.close();
+
+// make sure this works with a local index as well (only the data 
plan needs to be adjusted)
+conn.createStatement().execute("CREATE LOCAL INDEX " + tableName + 
"_IDX ON " + tableName + "(y.v2)");
+conn.commit();
+
+rs = conn.createStatement().executeQuery("SELECT x.* FROM 
"+tableName+" WHERE y.v2 = 2");
+assertTrue(rs.next());
+assertEquals("test", rs.getString(1));
+rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT y.* FROM 
"+tableName+" WHERE x.v1 <> 'blah'");
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(1));
+rs.close();
+}
+}
+
+@Test
+public void testMultipleCFWildcardProjection() throws Exception {
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+String tableName = generateUniqueName();
+String ddl =
+"CREATE TABLE IF NOT EXISTS " + tableName + " (pk1 INTEGER 
NOT NULL PRIMARY KEY, x.v1 VARCHAR, y.v2 INTEGER, z.v3 INTEGER)";
+conn.createStatement().execute(ddl);
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(1, 'test', 2, 3)");
+conn.commit();
+
+ResultSet rs = conn.createStatement().executeQuery("SELECT x.*, 
z.* FROM "+tableName+" WHERE y.v2 = 2");
+assertTrue(rs.next());
+assertEquals("test", rs.getString(1));
+assertEquals(3, rs.getInt(2));
+rs.close();
+
+// make sure this works with a local index as well (only the data 
plan needs to be adjusted)
+conn.createStatement().execute("CREATE LOCAL INDEX " + tableName + 
"_IDX ON " + tableName + "(y.v2)");
+conn.commit();
+
+rs = conn.createStatement().executeQuery("SELECT x.*, z.* FROM 
"+tableName+" WHERE y.v2 = 2");
+assertTrue(rs.next());
+assertEquals("test", rs.getString(1));
+assertEquals(3, rs.getInt(2));
+rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT x.*, y.* FROM 
"+tableName+" WHERE z.v3 = 3");
+assertTrue(rs.next());
+assertEquals("test", rs.getString(1));
+assertEquals(2, rs.getInt(2));
+rs.close();
+}
+}
+
+@Test
 public void testMixedDefaultAndExplicitCFs() throws Exception {
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String tableName = generateUniqueName();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix

[phoenix] branch master updated (e1f87f3 -> 2a2d996)

2021-03-23 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from e1f87f3  PHOENIX-6417 Fix PHERF ITs that are failing in the local 
builds
 add 2a2d996  PHOENIX-6424 SELECT cf1.* FAILS with a WHERE clause including 
cf2.

No new revisions were added by this update.

Summary of changes:
 .../apache/phoenix/end2end/MultiCfQueryExecIT.java | 65 ++
 .../phoenix/iterate/BaseResultIterators.java   |  4 ++
 .../apache/phoenix/util/EncodedColumnsUtil.java|  2 +-
 3 files changed, 70 insertions(+), 1 deletion(-)


[phoenix] branch 4.x updated: PHOENIX-6421 Selecting an indexed array value from an uncovered column with local index returns NULL.

2021-03-22 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 410f738  PHOENIX-6421 Selecting an indexed array value from an 
uncovered column with local index returns NULL.
410f738 is described below

commit 410f738f6f3824feb7fec5b5486b2993be15a5ef
Author: Lars 
AuthorDate: Fri Mar 19 19:00:17 2021 -0700

PHOENIX-6421 Selecting an indexed array value from an uncovered column with 
local index returns NULL.
---
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 30 ++
 .../apache/phoenix/compile/ProjectionCompiler.java |  6 +
 .../phoenix/iterate/RegionScannerFactory.java  | 18 +
 3 files changed, 43 insertions(+), 11 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 9f79107..2910322 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -83,6 +83,36 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 }
 
 @Test
+public void testSelectFromIndexWithUncoveredArrayIndex() throws Exception {
+if (isNamespaceMapped) {
+return;
+}
+String tableName = schemaName + "." + generateUniqueName();
+String indexName = "IDX_" + generateUniqueName();
+
+Connection conn = getConnection();
+conn.setAutoCommit(true);
+
+conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT, v2 FLOAT[])");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(1, 2, ARRAY[3,4])");
+
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1)");
+
+ResultSet rs = conn.createStatement().executeQuery("SELECT v2[1] FROM 
"+tableName+" WHERE v1 < 3");
+rs.next();
+assertEquals(3, rs.getInt(1));
+rs.close();
+
+conn.createStatement().execute("DROP INDEX " + indexName + " ON " + 
tableName);
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v2[2])");
+
+rs = conn.createStatement().executeQuery("SELECT v2[1] FROM 
"+tableName+" WHERE v2[2] < 5");
+rs.next();
+assertEquals(3, rs.getInt(1));
+rs.close();
+}
+
+@Test
 public void testSelectFromIndexWithAdditionalWhereClause() throws 
Exception {
 if (isNamespaceMapped) {
 return;
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
index 799b667..13ca41b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
@@ -81,6 +81,7 @@ import 
org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
 import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.ProjectedColumn;
 import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.TableRef;
@@ -699,6 +700,11 @@ public class ProjectionCompiler {
  if (expression.getDataType().isArrayType()) {
  indexProjectedColumns.add(expression);
  PColumn col = expression.getColumn();
+ // hack'ish... For covered columns with local 
indexes we defer to the server.
+ if (col instanceof ProjectedColumn && 
((ProjectedColumn) col)
+ .getSourceColumnRef() instanceof 
LocalIndexDataColumnRef) {
+ return null;
+ }
  PTable table = 
context.getCurrentTable().getTable();
  KeyValueColumnExpression keyValueColumnExpression;
  if (table.getImmutableStorageScheme() != 
ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
index 414f294..2dd37a8 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
+++ 
b/phoenix-cor

[phoenix] branch 5.1 updated: PHOENIX-6421 Selecting an indexed array value from an uncovered column with local index returns NULL.

2021-03-22 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
 new 08f45ac  PHOENIX-6421 Selecting an indexed array value from an 
uncovered column with local index returns NULL.
08f45ac is described below

commit 08f45acfd44b513b1ce0bb8c3e2e3c49b0b269e1
Author: Lars 
AuthorDate: Fri Mar 19 19:00:17 2021 -0700

PHOENIX-6421 Selecting an indexed array value from an uncovered column with 
local index returns NULL.
---
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 30 ++
 .../apache/phoenix/compile/ProjectionCompiler.java |  6 +
 .../phoenix/iterate/RegionScannerFactory.java  | 18 +
 3 files changed, 43 insertions(+), 11 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 279c2e7..64a85eb 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -84,6 +84,36 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 }
 
 @Test
+public void testSelectFromIndexWithUncoveredArrayIndex() throws Exception {
+if (isNamespaceMapped) {
+return;
+}
+String tableName = schemaName + "." + generateUniqueName();
+String indexName = "IDX_" + generateUniqueName();
+
+Connection conn = getConnection();
+conn.setAutoCommit(true);
+
+conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT, v2 FLOAT[])");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(1, 2, ARRAY[3,4])");
+
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1)");
+
+ResultSet rs = conn.createStatement().executeQuery("SELECT v2[1] FROM 
"+tableName+" WHERE v1 < 3");
+rs.next();
+assertEquals(3, rs.getInt(1));
+rs.close();
+
+conn.createStatement().execute("DROP INDEX " + indexName + " ON " + 
tableName);
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v2[2])");
+
+rs = conn.createStatement().executeQuery("SELECT v2[1] FROM 
"+tableName+" WHERE v2[2] < 5");
+rs.next();
+assertEquals(3, rs.getInt(1));
+rs.close();
+}
+
+@Test
 public void testSelectFromIndexWithAdditionalWhereClause() throws 
Exception {
 if (isNamespaceMapped) {
 return;
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
index bae0f0c..c13f383 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
@@ -81,6 +81,7 @@ import 
org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
 import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.ProjectedColumn;
 import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.TableRef;
@@ -699,6 +700,11 @@ public class ProjectionCompiler {
  if (expression.getDataType().isArrayType()) {
  indexProjectedColumns.add(expression);
  PColumn col = expression.getColumn();
+ // hack'ish... For covered columns with local 
indexes we defer to the server.
+ if (col instanceof ProjectedColumn && 
((ProjectedColumn) col)
+ .getSourceColumnRef() instanceof 
LocalIndexDataColumnRef) {
+ return null;
+ }
  PTable table = 
context.getCurrentTable().getTable();
  KeyValueColumnExpression keyValueColumnExpression;
  if (table.getImmutableStorageScheme() != 
ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
index 8d70ab0..a313fe7 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
+++ 
b/phoenix-cor

[phoenix] branch master updated (b5bea88 -> 84d91ff)

2021-03-22 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from b5bea88  PHOENIX-6423 Wildcard queries fail with mixed default and 
explicit column families.
 add 84d91ff  PHOENIX-6421 Selecting an indexed array value from an 
uncovered column with local index returns NULL.

No new revisions were added by this update.

Summary of changes:
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 30 ++
 .../apache/phoenix/compile/ProjectionCompiler.java |  6 +
 .../phoenix/iterate/RegionScannerFactory.java  | 18 +
 3 files changed, 43 insertions(+), 11 deletions(-)


[phoenix] branch 4.x updated: PHOENIX-6423 Wildcard queries fail with mixed default and explicit column families.

2021-03-21 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 9cfb638  PHOENIX-6423 Wildcard queries fail with mixed default and 
explicit column families.
9cfb638 is described below

commit 9cfb63819f85306a406efaea8b326fc080cc5196
Author: Lars 
AuthorDate: Sat Mar 20 13:19:14 2021 -0700

PHOENIX-6423 Wildcard queries fail with mixed default and explicit column 
families.
---
 .../apache/phoenix/end2end/MultiCfQueryExecIT.java | 26 ++
 .../apache/phoenix/compile/ProjectionCompiler.java |  5 +
 2 files changed, 31 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
index 01da2d8..9299f93 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
@@ -342,6 +342,32 @@ public class MultiCfQueryExecIT extends 
ParallelStatsEnabledIT {
 }
 
 @Test
+public void testMixedDefaultAndExplicitCFs() throws Exception {
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+String tableName = generateUniqueName();
+String ddl =
+"CREATE TABLE IF NOT EXISTS " + tableName + " (pk1 INTEGER 
NOT NULL PRIMARY KEY, v1 VARCHAR, y.v1 INTEGER)";
+conn.createStatement().execute(ddl);
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(1, 'test', 2)");
+conn.commit();
+ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM 
"+tableName);
+assertTrue(rs.next());
+// Without PHOENIX-6423 this would throw a type mismatch 
exception, because it would confuse the 3rd
+// column to also be the VARCHAR column.
+assertEquals(2, rs.getInt(3));
+rs.close();
+
+// make sure this works with a local index as well (only the data 
plan needs to be adjusted)
+conn.createStatement().execute("CREATE LOCAL INDEX " + tableName + 
"_IDX ON " + tableName + "(v1)");
+conn.commit();
+rs = conn.createStatement().executeQuery("SELECT * FROM 
"+tableName);
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(3));
+rs.close();
+}
+}
+
+@Test
 public void testBug3890() throws Exception {
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String tableName = generateUniqueName();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
index b833849..799b667 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
@@ -159,6 +159,11 @@ public class ProjectionCompiler {
 String schemaName = table.getSchemaName().getString();
 ref = resolver.resolveColumn(schemaName.length() == 0 
? null : schemaName, table.getTableName().getString(), colName);
 }
+// The freshly revolved column's family better be the same 
as the original one.
+// If not, trigger the disambiguation logic. Also see 
PTableImpl.getColumnForColumnName(...)
+if (column.getFamilyName() != null && 
!column.getFamilyName().equals(ref.getColumn().getFamilyName())) {
+throw new AmbiguousColumnException();
+}
 } catch (AmbiguousColumnException e) {
 if (column.getFamilyName() != null) {
 ref = resolver.resolveColumn(tableAlias != null ? 
tableAlias : table.getTableName().getString(), 
column.getFamilyName().getString(), colName);


[phoenix] branch 5.1 updated: PHOENIX-6423 Wildcard queries fail with mixed default and explicit column families.

2021-03-21 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
 new 99c0cc4  PHOENIX-6423 Wildcard queries fail with mixed default and 
explicit column families.
99c0cc4 is described below

commit 99c0cc49f674d67e11243f80d7d22bed3dc56ab7
Author: Lars 
AuthorDate: Sat Mar 20 13:19:14 2021 -0700

PHOENIX-6423 Wildcard queries fail with mixed default and explicit column 
families.
---
 .../apache/phoenix/end2end/MultiCfQueryExecIT.java | 26 ++
 .../apache/phoenix/compile/ProjectionCompiler.java |  5 +
 2 files changed, 31 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
index 01da2d8..9299f93 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
@@ -342,6 +342,32 @@ public class MultiCfQueryExecIT extends 
ParallelStatsEnabledIT {
 }
 
 @Test
+public void testMixedDefaultAndExplicitCFs() throws Exception {
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+String tableName = generateUniqueName();
+String ddl =
+"CREATE TABLE IF NOT EXISTS " + tableName + " (pk1 INTEGER 
NOT NULL PRIMARY KEY, v1 VARCHAR, y.v1 INTEGER)";
+conn.createStatement().execute(ddl);
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(1, 'test', 2)");
+conn.commit();
+ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM 
"+tableName);
+assertTrue(rs.next());
+// Without PHOENIX-6423 this would throw a type mismatch 
exception, because it would confuse the 3rd
+// column to also be the VARCHAR column.
+assertEquals(2, rs.getInt(3));
+rs.close();
+
+// make sure this works with a local index as well (only the data 
plan needs to be adjusted)
+conn.createStatement().execute("CREATE LOCAL INDEX " + tableName + 
"_IDX ON " + tableName + "(v1)");
+conn.commit();
+rs = conn.createStatement().executeQuery("SELECT * FROM 
"+tableName);
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(3));
+rs.close();
+}
+}
+
+@Test
 public void testBug3890() throws Exception {
 try (Connection conn = DriverManager.getConnection(getUrl())) {
 String tableName = generateUniqueName();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
index 31af76a..bae0f0c 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
@@ -159,6 +159,11 @@ public class ProjectionCompiler {
 String schemaName = table.getSchemaName().getString();
 ref = resolver.resolveColumn(schemaName.length() == 0 
? null : schemaName, table.getTableName().getString(), colName);
 }
+// The freshly revolved column's family better be the same 
as the original one.
+// If not, trigger the disambiguation logic. Also see 
PTableImpl.getColumnForColumnName(...)
+if (column.getFamilyName() != null && 
!column.getFamilyName().equals(ref.getColumn().getFamilyName())) {
+throw new AmbiguousColumnException();
+}
 } catch (AmbiguousColumnException e) {
 if (column.getFamilyName() != null) {
 ref = resolver.resolveColumn(tableAlias != null ? 
tableAlias : table.getTableName().getString(), 
column.getFamilyName().getString(), colName);


[phoenix] branch master updated (888c249 -> b5bea88)

2021-03-21 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 888c249  PHOENIX-6413 Having cannot resolve alias (#1168)
 add b5bea88  PHOENIX-6423 Wildcard queries fail with mixed default and 
explicit column families.

No new revisions were added by this update.

Summary of changes:
 .../apache/phoenix/end2end/MultiCfQueryExecIT.java | 26 ++
 .../apache/phoenix/compile/ProjectionCompiler.java |  5 +
 2 files changed, 31 insertions(+)


[phoenix] branch 4.x updated: PHOENIX-6409 Include local index uncovered columns merge in explain plan.

2021-03-13 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 9991da3  PHOENIX-6409 Include local index uncovered columns merge in 
explain plan.
9991da3 is described below

commit 9991da34346c815dafc3b578e587ee80c9afb7d3
Author: Lars 
AuthorDate: Thu Mar 11 13:25:35 2021 -0800

PHOENIX-6409 Include local index uncovered columns merge in explain plan.
---
 .../phoenix/end2end/CostBasedDecisionIT.java   |  4 
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 13 
 .../phoenix/end2end/index/MutableIndexIT.java  |  1 +
 .../apache/phoenix/end2end/index/ViewIndexIT.java  |  1 +
 .../phoenix/end2end/join/HashJoinLocalIndexIT.java |  3 +++
 .../phoenix/compile/ExplainPlanAttributes.java | 23 --
 .../org/apache/phoenix/iterate/ExplainTable.java   |  9 +
 7 files changed, 52 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CostBasedDecisionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CostBasedDecisionIT.java
index e2f5cfb..15414a8 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CostBasedDecisionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CostBasedDecisionIT.java
@@ -361,10 +361,12 @@ public class CostBasedDecisionIT extends 
BaseUniqueNamesOwnClusterIT {
 verifyQueryPlan(query,
 "UNION ALL OVER 2 QUERIES\n" +
 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName + 
" [1]\n" +
+"SERVER MERGE [0.C2]\n" +
 "SERVER FILTER BY FIRST KEY ONLY AND \"ROWKEY\" <= 
'z'\n" +
 "SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY 
[\"C1\"]\n" +
 "CLIENT MERGE SORT\n" +
 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName + 
" [1]\n" +
+"SERVER MERGE [0.C2]\n" +
 "SERVER FILTER BY FIRST KEY ONLY AND \"ROWKEY\" >= 
'a'\n" +
 "SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY 
[\"C1\"]\n" +
 "CLIENT MERGE SORT");
@@ -413,11 +415,13 @@ public class CostBasedDecisionIT extends 
BaseUniqueNamesOwnClusterIT {
 // Use the optimal plan based on cost when stats become available.
 verifyQueryPlan(query,
 "CLIENT PARALLEL 626-WAY RANGE SCAN OVER " + tableName + " 
[1,'X0'] - [1,'X1']\n" +
+"SERVER MERGE [0.C2]\n" +
 "SERVER FILTER BY FIRST KEY ONLY\n" +
 "SERVER SORTED BY [\"T1.:ROWKEY\"]\n" +
 "CLIENT MERGE SORT\n" +
 "PARALLEL INNER-JOIN TABLE 0\n" +
 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + 
tableName + " [1]\n" +
+"SERVER MERGE [0.C2]\n" +
 "SERVER FILTER BY FIRST KEY ONLY AND 
\"ROWKEY\" <= 'z'\n" +
 "SERVER AGGREGATE INTO ORDERED DISTINCT ROWS 
BY [\"C1\"]\n" +
 "CLIENT MERGE SORT\n" +
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 2de2c94..9bff734 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -308,6 +308,7 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 assertEquals(
 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER "
 + physicalTableName + " [1,3,4,3]\n"
++ "SERVER MERGE [0.V3]\n"
 + "SERVER FILTER BY FIRST KEY ONLY AND \"V3\" = 1\n"
 + "CLIENT MERGE SORT",
 QueryUtil.getExplainPlan(rs));
@@ -337,6 +338,7 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 assertEquals(
 "CLIENT PARALLEL 16-WAY RANGE SCAN OVER "
 + indexPhysicalTableName + " [1,2,3]\n"
++ "SERVER MERGE [0.V1]\n"
 + "SERVER FILTER BY FIRST KEY ONLY\n"
 + "CLIENT MERGE S

[phoenix] branch 5.1 updated: PHOENIX-6409 Include local index uncovered columns merge in explain plan.

2021-03-13 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
 new c8f30e8  PHOENIX-6409 Include local index uncovered columns merge in 
explain plan.
c8f30e8 is described below

commit c8f30e89f63d16634e4ba686c3938c44879bb537
Author: Lars 
AuthorDate: Thu Mar 11 13:25:35 2021 -0800

PHOENIX-6409 Include local index uncovered columns merge in explain plan.
---
 .../phoenix/end2end/CostBasedDecisionIT.java   |  4 
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 13 
 .../phoenix/end2end/index/MutableIndexIT.java  |  1 +
 .../apache/phoenix/end2end/index/ViewIndexIT.java  |  1 +
 .../phoenix/end2end/join/HashJoinLocalIndexIT.java |  3 +++
 .../phoenix/compile/ExplainPlanAttributes.java | 23 --
 .../org/apache/phoenix/iterate/ExplainTable.java   |  9 +
 7 files changed, 52 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CostBasedDecisionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CostBasedDecisionIT.java
index c313393..f95215a 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CostBasedDecisionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CostBasedDecisionIT.java
@@ -361,10 +361,12 @@ public class CostBasedDecisionIT extends 
BaseUniqueNamesOwnClusterIT {
 verifyQueryPlan(query,
 "UNION ALL OVER 2 QUERIES\n" +
 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName + 
" [1]\n" +
+"SERVER MERGE [0.C2]\n" +
 "SERVER FILTER BY FIRST KEY ONLY AND \"ROWKEY\" <= 
'z'\n" +
 "SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY 
[\"C1\"]\n" +
 "CLIENT MERGE SORT\n" +
 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName + 
" [1]\n" +
+"SERVER MERGE [0.C2]\n" +
 "SERVER FILTER BY FIRST KEY ONLY AND \"ROWKEY\" >= 
'a'\n" +
 "SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY 
[\"C1\"]\n" +
 "CLIENT MERGE SORT");
@@ -413,11 +415,13 @@ public class CostBasedDecisionIT extends 
BaseUniqueNamesOwnClusterIT {
 // Use the optimal plan based on cost when stats become available.
 verifyQueryPlan(query,
 "CLIENT PARALLEL 626-WAY RANGE SCAN OVER " + tableName + " 
[1,'X0'] - [1,'X1']\n" +
+"SERVER MERGE [0.C2]\n" +
 "SERVER FILTER BY FIRST KEY ONLY\n" +
 "SERVER SORTED BY [\"T1.:ROWKEY\"]\n" +
 "CLIENT MERGE SORT\n" +
 "PARALLEL INNER-JOIN TABLE 0\n" +
 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + 
tableName + " [1]\n" +
+"SERVER MERGE [0.C2]\n" +
 "SERVER FILTER BY FIRST KEY ONLY AND 
\"ROWKEY\" <= 'z'\n" +
 "SERVER AGGREGATE INTO ORDERED DISTINCT ROWS 
BY [\"C1\"]\n" +
 "CLIENT MERGE SORT\n" +
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 0a071dd..3b1c554 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -309,6 +309,7 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 assertEquals(
 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER "
 + physicalTableName + " [1,3,4,3]\n"
++ "SERVER MERGE [0.V3]\n"
 + "SERVER FILTER BY FIRST KEY ONLY AND \"V3\" = 1\n"
 + "CLIENT MERGE SORT",
 QueryUtil.getExplainPlan(rs));
@@ -338,6 +339,7 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 assertEquals(
 "CLIENT PARALLEL 16-WAY RANGE SCAN OVER "
 + indexPhysicalTableName + " [1,2,3]\n"
++ "SERVER MERGE [0.V1]\n"
 + "SERVER FILTER BY FIRST KEY ONLY\n"
 + "CLIENT MERGE S

[phoenix] branch master updated (868c8ef -> e0c500d)

2021-03-13 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 868c8ef  PHOENIX-6385 : For non-small Scan, not to use Scan#setSmall 
for HBase 2.x versions (#1167)
 add e0c500d  PHOENIX-6409 Include local index uncovered columns merge in 
explain plan.

No new revisions were added by this update.

Summary of changes:
 .../phoenix/end2end/CostBasedDecisionIT.java   |  4 
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 13 
 .../phoenix/end2end/index/MutableIndexIT.java  |  1 +
 .../apache/phoenix/end2end/index/ViewIndexIT.java  |  1 +
 .../phoenix/end2end/join/HashJoinLocalIndexIT.java |  3 +++
 .../phoenix/compile/ExplainPlanAttributes.java | 23 --
 .../org/apache/phoenix/iterate/ExplainTable.java   |  9 +
 7 files changed, 52 insertions(+), 2 deletions(-)



[phoenix] branch 4.x updated: PHOENIX-6408 LIMIT on local index query with uncovered columns in the WHERE returns wrong result.

2021-03-10 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 61dd12f  PHOENIX-6408 LIMIT on local index query with uncovered 
columns in the WHERE returns wrong result.
61dd12f is described below

commit 61dd12f0f71cf2c8f1ff2ed9bffaa0d71548b9d9
Author: Lars 
AuthorDate: Tue Mar 9 19:36:48 2021 -0800

PHOENIX-6408 LIMIT on local index query with uncovered columns in the WHERE 
returns wrong result.
---
 .../org/apache/phoenix/end2end/index/LocalIndexIT.java  | 17 +
 .../phoenix/coprocessor/BaseScannerRegionObserver.java  |  1 +
 .../org/apache/phoenix/iterate/BaseResultIterators.java |  9 -
 .../java/org/apache/phoenix/iterate/ExplainTable.java   | 11 ++-
 .../apache/phoenix/iterate/RegionScannerFactory.java|  8 
 5 files changed, 44 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index a3f3ed1..2de2c94 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -152,6 +152,23 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 rs.next();
 assertEquals(6, rs.getInt(1));
 rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT * FROM "+tableName+" 
WHERE v1 > 0 AND v3 > 5 LIMIT 2");
+assertTrue(rs.next());
+assertTrue(rs.next());
+assertFalse(rs.next());
+rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT * FROM "+tableName+" 
WHERE v1 > 0 AND v3 > 5 LIMIT 1");
+assertTrue(rs.next());
+assertFalse(rs.next());
+rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT * FROM "+tableName+" 
WHERE v3 > 5 ORDER BY v1 LIMIT 2");
+assertTrue(rs.next());
+assertTrue(rs.next());
+assertFalse(rs.next());
+rs.close();
 }
 
 @Test
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 7bb84f7..b4c204b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -98,6 +98,7 @@ abstract public class BaseScannerRegionObserver extends 
BaseRegionObserver {
 public static final String 
INDEX_REBUILD_DISABLE_LOGGING_BEYOND_MAXLOOKBACK_AGE =
 "_IndexRebuildDisableLoggingBeyondMaxLookbackAge";
 public static final String LOCAL_INDEX_FILTER = "_LocalIndexFilter";
+public static final String LOCAL_INDEX_LIMIT = "_LocalIndexLimit";
 public static final String LOCAL_INDEX_FILTER_STR = "_LocalIndexFilterStr";
 
 /* 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 9190a33..882cfaa 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -281,7 +281,14 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 }
 
 if (perScanLimit != null) {
-ScanUtil.andFilterAtEnd(scan, new PageFilter(perScanLimit));
+if 
(scan.getAttribute(BaseScannerRegionObserver.LOCAL_INDEX_FILTER) == null) {
+ScanUtil.andFilterAtEnd(scan, new 
PageFilter(perScanLimit));
+} else {
+// if we have a local index filter and a limit, handle the 
limit after the filter
+// we cast the limit to a long even though it passed as an 
Integer so that
+// if we need extend this in the future the serialization 
is unchanged
+
scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_LIMIT, 
Bytes.toBytes((long)perScanLimit));
+}
 }
 
 if(offset!=null){
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
index cf5e021..163364c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
@@ -226,8 +226,17 @@ public abstract class ExplainTable {
 if (offset != n

[phoenix] branch 5.1 updated: PHOENIX-6408 LIMIT on local index query with uncovered columns in the WHERE returns wrong result.

2021-03-10 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
 new 148800c  PHOENIX-6408 LIMIT on local index query with uncovered 
columns in the WHERE returns wrong result.
148800c is described below

commit 148800c70b44147c28a2014a9b88ecda7d567ea8
Author: Lars 
AuthorDate: Tue Mar 9 19:36:48 2021 -0800

PHOENIX-6408 LIMIT on local index query with uncovered columns in the WHERE 
returns wrong result.
---
 .../org/apache/phoenix/end2end/index/LocalIndexIT.java  | 17 +
 .../phoenix/coprocessor/BaseScannerRegionObserver.java  |  1 +
 .../org/apache/phoenix/iterate/BaseResultIterators.java |  9 -
 .../java/org/apache/phoenix/iterate/ExplainTable.java   | 11 ++-
 .../apache/phoenix/iterate/RegionScannerFactory.java|  8 
 5 files changed, 44 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 90d172e..0a071dd 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -153,6 +153,23 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 rs.next();
 assertEquals(6, rs.getInt(1));
 rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT * FROM "+tableName+" 
WHERE v1 > 0 AND v3 > 5 LIMIT 2");
+assertTrue(rs.next());
+assertTrue(rs.next());
+assertFalse(rs.next());
+rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT * FROM "+tableName+" 
WHERE v1 > 0 AND v3 > 5 LIMIT 1");
+assertTrue(rs.next());
+assertFalse(rs.next());
+rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT * FROM "+tableName+" 
WHERE v3 > 5 ORDER BY v1 LIMIT 2");
+assertTrue(rs.next());
+assertTrue(rs.next());
+assertFalse(rs.next());
+rs.close();
 }
 
 @Test
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 1244276..c7dfe9a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -81,6 +81,7 @@ abstract public class BaseScannerRegionObserver extends 
CompatBaseScannerRegionO
 public static final String 
INDEX_REBUILD_DISABLE_LOGGING_BEYOND_MAXLOOKBACK_AGE =
 "_IndexRebuildDisableLoggingBeyondMaxLookbackAge";
 public static final String LOCAL_INDEX_FILTER = "_LocalIndexFilter";
+public static final String LOCAL_INDEX_LIMIT = "_LocalIndexLimit";
 public static final String LOCAL_INDEX_FILTER_STR = "_LocalIndexFilterStr";
 
 /* 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index ac8cce4..68c00a0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -280,7 +280,14 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 }
 
 if (perScanLimit != null) {
-ScanUtil.andFilterAtEnd(scan, new PageFilter(perScanLimit));
+if 
(scan.getAttribute(BaseScannerRegionObserver.LOCAL_INDEX_FILTER) == null) {
+ScanUtil.andFilterAtEnd(scan, new 
PageFilter(perScanLimit));
+} else {
+// if we have a local index filter and a limit, handle the 
limit after the filter
+// we cast the limit to a long even though it passed as an 
Integer so that
+// if we need extend this in the future the serialization 
is unchanged
+
scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_LIMIT, 
Bytes.toBytes((long)perScanLimit));
+}
 }
 
 if(offset!=null){
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
index cf5e021..163364c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
@@ -226,8 +226,17 @@ public abstract class ExplainTable {
 if (offset != n

[phoenix] branch master updated (b6b41ce -> dcf3bf9)

2021-03-10 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from b6b41ce  PHOENIX-6376 Update MetaDataProtocol.java for Phoenix 5.2
 add dcf3bf9  PHOENIX-6408 LIMIT on local index query with uncovered 
columns in the WHERE returns wrong result.

No new revisions were added by this update.

Summary of changes:
 .../org/apache/phoenix/end2end/index/LocalIndexIT.java  | 17 +
 .../phoenix/coprocessor/BaseScannerRegionObserver.java  |  1 +
 .../org/apache/phoenix/iterate/BaseResultIterators.java |  9 -
 .../java/org/apache/phoenix/iterate/ExplainTable.java   | 11 ++-
 .../apache/phoenix/iterate/RegionScannerFactory.java|  8 
 5 files changed, 44 insertions(+), 2 deletions(-)



[phoenix] branch 4.x updated: PHOENIX-6402 Allow using local indexes with uncovered columns in the WHERE clause.

2021-03-09 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 78a5ab0  PHOENIX-6402 Allow using local indexes with uncovered columns 
in the WHERE clause.
78a5ab0 is described below

commit 78a5ab06bb382a04974ca93c7a33aff0867c7132
Author: Lars 
AuthorDate: Sat Mar 6 12:51:14 2021 -0800

PHOENIX-6402 Allow using local indexes with uncovered columns in the WHERE 
clause.
---
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 84 ++
 .../org/apache/phoenix/compile/WhereCompiler.java  | 35 ++---
 .../coprocessor/BaseScannerRegionObserver.java |  2 +
 .../phoenix/iterate/BaseResultIterators.java   | 33 -
 .../org/apache/phoenix/iterate/ExplainTable.java   | 11 ++-
 .../phoenix/iterate/OrderedResultIterator.java |  5 ++
 .../phoenix/iterate/RegionScannerFactory.java  | 35 +
 .../apache/phoenix/schema/types/PVarbinary.java|  2 +-
 8 files changed, 165 insertions(+), 42 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 276395e..a3f3ed1 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -81,22 +81,77 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 
 @Test
 public void testSelectFromIndexWithAdditionalWhereClause() throws 
Exception {
+if (isNamespaceMapped) {
+return;
+}
 String tableName = schemaName + "." + generateUniqueName();
 String indexName = "IDX_" + generateUniqueName();
 
 Connection conn = getConnection();
 conn.setAutoCommit(true);
-if (isNamespaceMapped) {
-conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + 
schemaName);
-}
 
-conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT, v2 FLOAT)");
+conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT, v2 FLOAT, v3 INTEGER)");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(1, 2, 3, 4)");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(2, 3, 4, 5)");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(3, 4, 5, 6)");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(4, 5, 6, 7)");
+
 conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1)");
-conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(1, 0.01, 1.0)");
-ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*) 
FROM "+tableName+" WHERE v1 < 0.1 and v2 < 10.0");
+testExtraWhere(conn, tableName);
+
+conn.createStatement().execute("DROP INDEX " + indexName + " ON " + 
tableName);
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1) INCLUDE (v3)");
+testExtraWhere(conn, tableName);
+
+conn.createStatement().execute("DROP INDEX " + indexName + " ON " + 
tableName);
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1) INCLUDE (v2)");
+testExtraWhere(conn, tableName);
+
+conn.createStatement().execute("DROP INDEX " + indexName + " ON " + 
tableName);
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1) INCLUDE (v2,v3)");
+testExtraWhere(conn, tableName);
+}
+
+private void testExtraWhere(Connection conn, String tableName) throws 
SQLException {
+ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*) 
FROM "+tableName+" WHERE v1 < 3 AND v2 < 4");
+rs.next();
+assertEquals(1, rs.getInt(1));
+rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM 
"+tableName+" WHERE v1 < 3 AND v3 < 5");
+rs.next();
+assertEquals(1, rs.getInt(1));
+rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM 
"+tableName+" WHERE v1 < 10 AND v2 < 0 AND v3 < 0");
+rs.next();
+assertEquals(0, rs.

[phoenix] branch 5.1 updated: PHOENIX-6402 Allow using local indexes with uncovered columns in the WHERE clause.

2021-03-09 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
 new 0bbdfda  PHOENIX-6402 Allow using local indexes with uncovered columns 
in the WHERE clause.
0bbdfda is described below

commit 0bbdfdae9a7d479de081b9b7511df96fa9cc5829
Author: Lars 
AuthorDate: Sat Mar 6 12:51:14 2021 -0800

PHOENIX-6402 Allow using local indexes with uncovered columns in the WHERE 
clause.
---
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 84 ++
 .../org/apache/phoenix/compile/WhereCompiler.java  | 35 ++---
 .../coprocessor/BaseScannerRegionObserver.java |  2 +
 .../phoenix/iterate/BaseResultIterators.java   | 33 -
 .../org/apache/phoenix/iterate/ExplainTable.java   | 11 ++-
 .../phoenix/iterate/OrderedResultIterator.java |  5 ++
 .../phoenix/iterate/RegionScannerFactory.java  | 35 +
 .../apache/phoenix/schema/types/PVarbinary.java|  2 +-
 8 files changed, 165 insertions(+), 42 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 495a0a5..90d172e 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -82,22 +82,77 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 
 @Test
 public void testSelectFromIndexWithAdditionalWhereClause() throws 
Exception {
+if (isNamespaceMapped) {
+return;
+}
 String tableName = schemaName + "." + generateUniqueName();
 String indexName = "IDX_" + generateUniqueName();
 
 Connection conn = getConnection();
 conn.setAutoCommit(true);
-if (isNamespaceMapped) {
-conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + 
schemaName);
-}
 
-conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT, v2 FLOAT)");
+conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT, v2 FLOAT, v3 INTEGER)");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(1, 2, 3, 4)");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(2, 3, 4, 5)");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(3, 4, 5, 6)");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(4, 5, 6, 7)");
+
 conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1)");
-conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(1, 0.01, 1.0)");
-ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*) 
FROM "+tableName+" WHERE v1 < 0.1 and v2 < 10.0");
+testExtraWhere(conn, tableName);
+
+conn.createStatement().execute("DROP INDEX " + indexName + " ON " + 
tableName);
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1) INCLUDE (v3)");
+testExtraWhere(conn, tableName);
+
+conn.createStatement().execute("DROP INDEX " + indexName + " ON " + 
tableName);
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1) INCLUDE (v2)");
+testExtraWhere(conn, tableName);
+
+conn.createStatement().execute("DROP INDEX " + indexName + " ON " + 
tableName);
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1) INCLUDE (v2,v3)");
+testExtraWhere(conn, tableName);
+}
+
+private void testExtraWhere(Connection conn, String tableName) throws 
SQLException {
+ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*) 
FROM "+tableName+" WHERE v1 < 3 AND v2 < 4");
+rs.next();
+assertEquals(1, rs.getInt(1));
+rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM 
"+tableName+" WHERE v1 < 3 AND v3 < 5");
+rs.next();
+assertEquals(1, rs.getInt(1));
+rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM 
"+tableName+" WHERE v1 < 10 AND v2 < 0 AND v3 < 0");
+rs.next();
+assertEquals(0, rs.

[phoenix] branch master updated (d161867 -> 5d78494)

2021-03-09 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from d161867  PHOENIX-6400 Do no use local index with uncovered columns in 
the WHERE clause.
 add 5d78494  PHOENIX-6402 Allow using local indexes with uncovered columns 
in the WHERE clause.

No new revisions were added by this update.

Summary of changes:
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 84 ++
 .../org/apache/phoenix/compile/WhereCompiler.java  | 35 ++---
 .../coprocessor/BaseScannerRegionObserver.java |  2 +
 .../phoenix/iterate/BaseResultIterators.java   | 33 -
 .../org/apache/phoenix/iterate/ExplainTable.java   | 11 ++-
 .../phoenix/iterate/OrderedResultIterator.java |  5 ++
 .../phoenix/iterate/RegionScannerFactory.java  | 35 +
 .../apache/phoenix/schema/types/PVarbinary.java|  2 +-
 8 files changed, 165 insertions(+), 42 deletions(-)



[phoenix] branch 4.x updated: PHOENIX-6400 Do no use local index with uncovered columns in the WHERE clause.

2021-03-04 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new aee3f83  PHOENIX-6400 Do no use local index with uncovered columns in 
the WHERE clause.
aee3f83 is described below

commit aee3f832af0fd128f9a1008fec8e53e59d30c6bc
Author: Lars 
AuthorDate: Wed Mar 3 13:01:07 2021 -0800

PHOENIX-6400 Do no use local index with uncovered columns in the WHERE 
clause.
---
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 37 --
 .../org/apache/phoenix/compile/WhereCompiler.java  | 11 +++
 2 files changed, 39 insertions(+), 9 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 14e85ab..276395e 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -80,6 +80,26 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 }
 
 @Test
+public void testSelectFromIndexWithAdditionalWhereClause() throws 
Exception {
+String tableName = schemaName + "." + generateUniqueName();
+String indexName = "IDX_" + generateUniqueName();
+
+Connection conn = getConnection();
+conn.setAutoCommit(true);
+if (isNamespaceMapped) {
+conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + 
schemaName);
+}
+
+conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT, v2 FLOAT)");
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1)");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(1, 0.01, 1.0)");
+ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*) 
FROM "+tableName+" WHERE v1 < 0.1 and v2 < 10.0");
+rs.next();
+assertEquals(1, rs.getInt(1));
+rs.close();
+}
+
+@Test
 public void testDeleteFromLocalIndex() throws Exception {
 String tableName = schemaName + "." + generateUniqueName();
 String indexName = "IDX_" + generateUniqueName();
@@ -211,13 +231,13 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 QueryUtil.getExplainPlan(rs));
 rs.close();
 
-// 4. Longer prefix on the index, use it.
+// 4. Longer prefix on the index.
+// Note: This cannot use the local index, see PHOENIX-6300
 rs = conn.createStatement().executeQuery("EXPLAIN SELECT v2 FROM " + 
tableName + " WHERE pk1 = 3 AND pk2 = 4 AND v1 = 3 AND v3 = 1");
 assertEquals(
 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER "
-+ physicalTableName + " [1,3,4,3]\n"
-+ "SERVER FILTER BY FIRST KEY ONLY AND \"V3\" = 1\n"
-+ "CLIENT MERGE SORT",
++ physicalTableName + " [3,4]\n"
++ "SERVER FILTER BY (V1 = 3.0 AND V3 = 1)",
 QueryUtil.getExplainPlan(rs));
 rs.close();
 }
@@ -353,13 +373,12 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 QueryUtil.getExplainPlan(rs));
 rs.close();
 
-// 10. Use index even when also filtering on non-indexed column
+// 10. Cannot use index even when also filtering on non-indexed 
column, see PHOENIX-6400
 rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + 
tableName + " WHERE v2 = 2 AND v1 = 3");
 assertEquals(
-"CLIENT PARALLEL 1-WAY RANGE SCAN OVER "
-+ indexPhysicalTableName + " [1,2]\n"
-+ "SERVER FILTER BY FIRST KEY ONLY AND \"V1\" 
= 3.0\n"
-+ "CLIENT MERGE SORT",
+"CLIENT PARALLEL 1-WAY FULL SCAN OVER "
++ indexPhysicalTableName + "\n"
++ "SERVER FILTER BY (V2 = 2.0 AND V1 = 3.0)",
 QueryUtil.getExplainPlan(rs));
 rs.close();
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
index a5fd6c3..558be36 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
@@ -59,

[phoenix] branch 5.1 updated: PHOENIX-6400 Do no use local index with uncovered columns in the WHERE clause.

2021-03-04 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
 new d17fab4  PHOENIX-6400 Do no use local index with uncovered columns in 
the WHERE clause.
d17fab4 is described below

commit d17fab43c14bd2ef76564c13205f73029d7e931b
Author: Lars 
AuthorDate: Wed Mar 3 13:01:07 2021 -0800

PHOENIX-6400 Do no use local index with uncovered columns in the WHERE 
clause.
---
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 37 --
 .../org/apache/phoenix/compile/WhereCompiler.java  | 11 +++
 2 files changed, 39 insertions(+), 9 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 7f57c91..495a0a5 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -81,6 +81,26 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 }
 
 @Test
+public void testSelectFromIndexWithAdditionalWhereClause() throws 
Exception {
+String tableName = schemaName + "." + generateUniqueName();
+String indexName = "IDX_" + generateUniqueName();
+
+Connection conn = getConnection();
+conn.setAutoCommit(true);
+if (isNamespaceMapped) {
+conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + 
schemaName);
+}
+
+conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT, v2 FLOAT)");
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1)");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(1, 0.01, 1.0)");
+ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*) 
FROM "+tableName+" WHERE v1 < 0.1 and v2 < 10.0");
+rs.next();
+assertEquals(1, rs.getInt(1));
+rs.close();
+}
+
+@Test
 public void testDeleteFromLocalIndex() throws Exception {
 String tableName = schemaName + "." + generateUniqueName();
 String indexName = "IDX_" + generateUniqueName();
@@ -212,13 +232,13 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 QueryUtil.getExplainPlan(rs));
 rs.close();
 
-// 4. Longer prefix on the index, use it.
+// 4. Longer prefix on the index.
+// Note: This cannot use the local index, see PHOENIX-6300
 rs = conn.createStatement().executeQuery("EXPLAIN SELECT v2 FROM " + 
tableName + " WHERE pk1 = 3 AND pk2 = 4 AND v1 = 3 AND v3 = 1");
 assertEquals(
 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER "
-+ physicalTableName + " [1,3,4,3]\n"
-+ "SERVER FILTER BY FIRST KEY ONLY AND \"V3\" = 1\n"
-+ "CLIENT MERGE SORT",
++ physicalTableName + " [3,4]\n"
++ "SERVER FILTER BY (V1 = 3.0 AND V3 = 1)",
 QueryUtil.getExplainPlan(rs));
 rs.close();
 }
@@ -354,13 +374,12 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 QueryUtil.getExplainPlan(rs));
 rs.close();
 
-// 10. Use index even when also filtering on non-indexed column
+// 10. Cannot use index even when also filtering on non-indexed 
column, see PHOENIX-6400
 rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + 
tableName + " WHERE v2 = 2 AND v1 = 3");
 assertEquals(
-"CLIENT PARALLEL 1-WAY RANGE SCAN OVER "
-+ indexPhysicalTableName + " [1,2]\n"
-+ "SERVER FILTER BY FIRST KEY ONLY AND \"V1\" 
= 3.0\n"
-+ "CLIENT MERGE SORT",
+"CLIENT PARALLEL 1-WAY FULL SCAN OVER "
++ indexPhysicalTableName + "\n"
++ "SERVER FILTER BY (V2 = 2.0 AND V1 = 3.0)",
 QueryUtil.getExplainPlan(rs));
 rs.close();
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
index 31524d1..4789f0c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
@@ -59,

[phoenix] branch master updated (f7d25b9 -> d161867)

2021-03-04 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from f7d25b9  PHOENIX-6388 Add sampled logging for read repairs
 add d161867  PHOENIX-6400 Do no use local index with uncovered columns in 
the WHERE clause.

No new revisions were added by this update.

Summary of changes:
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 37 --
 .../org/apache/phoenix/compile/WhereCompiler.java  | 11 +++
 2 files changed, 39 insertions(+), 9 deletions(-)



[phoenix] branch 4.x updated: PHOENIX-5171 SkipScan incorrectly filters composite primary key which the key range contains all values.

2020-09-09 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new fece8e6  PHOENIX-5171 SkipScan incorrectly filters composite primary 
key which the key range contains all values.
fece8e6 is described below

commit fece8e69b9c03c80db7a0801d99e5de31fe15ffa
Author: Lars 
AuthorDate: Wed Sep 9 16:32:03 2020 -0700

PHOENIX-5171 SkipScan incorrectly filters composite primary key which the 
key range contains all values.
---
 .../apache/phoenix/end2end/SkipScanQueryIT.java| 33 
 .../java/org/apache/phoenix/util/ScanUtil.java |  5 +--
 .../apache/phoenix/filter/SkipScanFilterTest.java  | 36 +-
 3 files changed, 69 insertions(+), 5 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
index ccba651..c06b528 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
@@ -712,4 +712,37 @@ public class SkipScanQueryIT extends 
ParallelStatsDisabledIT {
 assertResultSet(rs, new Object[][]{{1,3,2,10},{1,3,5,6}});
 }
 }
+
+@Test
+public void testKeyRangesContainsAllValues() throws Exception {
+String tableName = generateUniqueName();
+String ddl = "CREATE TABLE IF NOT EXISTS " + tableName + "(" +
+ " vdate VARCHAR, " +
+ " tab VARCHAR, " +
+ " dev TINYINT NOT NULL, " +
+ " app VARCHAR, " +
+ " target VARCHAR, " +
+ " channel VARCHAR, " +
+ " one VARCHAR, " +
+ " two VARCHAR, " +
+ " count1 INTEGER, " +
+ " count2 INTEGER, " +
+ " CONSTRAINT PK PRIMARY KEY 
(vdate,tab,dev,app,target,channel,one,two))";
+
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+conn.createStatement().execute(ddl);
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES('2018-02-14','channel_agg',2,null,null,'A004',null,null,2,2)");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES('2018-02-14','channel_agg',2,null,null,null,null,null,2,2)");
+conn.commit();
+
+ResultSet rs = conn.createStatement().executeQuery(
+"SELECT * FROM " + tableName +
+" WHERE dev = 2 AND vdate BETWEEN '2018-02-10' AND 
'2019-02-19'" +
+" AND tab = 'channel_agg' AND channel='A004'");
+
+assertTrue(rs.next());
+assertEquals("2018-02-14", rs.getString(1));
+assertFalse(rs.next());
+}
+}
 }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
index 28ea349..be63fae 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -406,12 +406,9 @@ public class ScanUtil {
  *for the same reason. However, if the type is variable width
  *continue building the key because null values will be 
filtered
  *since our separator byte will be appended and incremented.
- * 3) if the range includes everything as we cannot add any more 
useful
- *information to the key after that.
  */
 lastUnboundUpper = false;
-if (  range.isUnbound(bound) &&
-( bound == Bound.UPPER || isFixedWidth || range == 
KeyRange.EVERYTHING_RANGE) ){
+if (range.isUnbound(bound) && (bound == Bound.UPPER || 
isFixedWidth)) {
 lastUnboundUpper = (bound == Bound.UPPER);
 break;
 }
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java
index ee6e68b..396ad84 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java
@@ -137,7 +137,8 @@ public class SkipScanFilterTest extends TestCase {
  
QueryConstants.SEPARATOR_BYTE_ARRAY,
  Bytes.toBytes("1") ), 
 
ByteU

[phoenix] branch master updated: PHOENIX-5171 SkipScan incorrectly filters composite primary key which the key range contains all values.

2020-09-09 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new f5cc0ad  PHOENIX-5171 SkipScan incorrectly filters composite primary 
key which the key range contains all values.
f5cc0ad is described below

commit f5cc0ad49f13283fefccddf1b187da95eecdb423
Author: Lars 
AuthorDate: Wed Sep 9 16:32:03 2020 -0700

PHOENIX-5171 SkipScan incorrectly filters composite primary key which the 
key range contains all values.
---
 .../apache/phoenix/end2end/SkipScanQueryIT.java| 33 
 .../java/org/apache/phoenix/util/ScanUtil.java |  5 +--
 .../apache/phoenix/filter/SkipScanFilterTest.java  | 36 +-
 3 files changed, 69 insertions(+), 5 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
index 64b897dd..6e269da 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
@@ -713,4 +713,37 @@ public class SkipScanQueryIT extends 
ParallelStatsDisabledIT {
 assertResultSet(rs, new Object[][]{{1,3,2,10},{1,3,5,6}});
 }
 }
+
+@Test
+public void testKeyRangesContainsAllValues() throws Exception {
+String tableName = generateUniqueName();
+String ddl = "CREATE TABLE IF NOT EXISTS " + tableName + "(" +
+ " vdate VARCHAR, " +
+ " tab VARCHAR, " +
+ " dev TINYINT NOT NULL, " +
+ " app VARCHAR, " +
+ " target VARCHAR, " +
+ " channel VARCHAR, " +
+ " one VARCHAR, " +
+ " two VARCHAR, " +
+ " count1 INTEGER, " +
+ " count2 INTEGER, " +
+ " CONSTRAINT PK PRIMARY KEY 
(vdate,tab,dev,app,target,channel,one,two))";
+
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+conn.createStatement().execute(ddl);
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES('2018-02-14','channel_agg',2,null,null,'A004',null,null,2,2)");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES('2018-02-14','channel_agg',2,null,null,null,null,null,2,2)");
+conn.commit();
+
+ResultSet rs = conn.createStatement().executeQuery(
+"SELECT * FROM " + tableName +
+" WHERE dev = 2 AND vdate BETWEEN '2018-02-10' AND 
'2019-02-19'" +
+" AND tab = 'channel_agg' AND channel='A004'");
+
+assertTrue(rs.next());
+assertEquals("2018-02-14", rs.getString(1));
+assertFalse(rs.next());
+}
+}
 }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
index 0a37411..c892ed2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -406,12 +406,9 @@ public class ScanUtil {
  *for the same reason. However, if the type is variable width
  *continue building the key because null values will be 
filtered
  *since our separator byte will be appended and incremented.
- * 3) if the range includes everything as we cannot add any more 
useful
- *information to the key after that.
  */
 lastUnboundUpper = false;
-if (  range.isUnbound(bound) &&
-( bound == Bound.UPPER || isFixedWidth || range == 
KeyRange.EVERYTHING_RANGE) ){
+if (range.isUnbound(bound) && (bound == Bound.UPPER || 
isFixedWidth)) {
 lastUnboundUpper = (bound == Bound.UPPER);
 break;
 }
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java
index 8f78588..641db78 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java
@@ -138,7 +138,8 @@ public class SkipScanFilterTest extends TestCase {
  
QueryConstants.SEPARATOR_BYTE_ARRAY,
  Bytes.toBytes("1") )

[phoenix] branch 4.x updated: PHOENIX-6115 Avoid scanning prior row state for uncovered local indexes on immutable tables.

2020-09-01 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 93a85f0  PHOENIX-6115 Avoid scanning prior row state for uncovered 
local indexes on immutable tables.
93a85f0 is described below

commit 93a85f0e67a771408be4840906dbd854415cf207
Author: Lars 
AuthorDate: Tue Sep 1 10:47:34 2020 -0700

PHOENIX-6115 Avoid scanning prior row state for uncovered local indexes on 
immutable tables.
---
 .../hbase/index/covered/data/CachedLocalTable.java| 19 ---
 1 file changed, 12 insertions(+), 7 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/CachedLocalTable.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/CachedLocalTable.java
index 7091178..83bec4b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/CachedLocalTable.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/CachedLocalTable.java
@@ -21,8 +21,10 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.Cell;
@@ -44,13 +46,11 @@ import org.apache.phoenix.schema.types.PVarbinary;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Sets;
 
-import java.util.HashMap;
-
 public class CachedLocalTable implements LocalHBaseState {
 
-private final HashMap> rowKeyPtrToCells;
+private final Map> rowKeyPtrToCells;
 
-private CachedLocalTable(HashMap> 
rowKeyPtrToCells) {
+private CachedLocalTable(Map> 
rowKeyPtrToCells) {
 this.rowKeyPtrToCells = rowKeyPtrToCells;
 }
 
@@ -86,7 +86,7 @@ public class CachedLocalTable implements LocalHBaseState {
 }
 
 @VisibleForTesting
-public static CachedLocalTable build(HashMap> rowKeyPtrToCells) {
+public static CachedLocalTable build(Map> 
rowKeyPtrToCells) {
 return new CachedLocalTable(rowKeyPtrToCells);
 }
 
@@ -111,12 +111,17 @@ public class CachedLocalTable implements LocalHBaseState {
 Collection 
dataTableMutationsWithSameRowKeyAndTimestamp,
 PhoenixIndexMetaData indexMetaData,
 Region region) throws IOException {
-List indexTableMaintainers = 
indexMetaData.getIndexMaintainers();
 Set keys = new 
HashSet(dataTableMutationsWithSameRowKeyAndTimestamp.size());
 for (Mutation mutation : dataTableMutationsWithSameRowKeyAndTimestamp) 
{
+  if (indexMetaData.requiresPriorRowState(mutation)) {
 keys.add(PVarbinary.INSTANCE.getKeyRange(mutation.getRow()));
+  }
+}
+if (keys.isEmpty()) {
+return new CachedLocalTable(Collections.>emptyMap());
 }
 
+List indexTableMaintainers = 
indexMetaData.getIndexMaintainers();
 Set getterColumnReferences = Sets.newHashSet();
 for (IndexMaintainer indexTableMaintainer : indexTableMaintainers) {
 getterColumnReferences.addAll(
@@ -149,7 +154,7 @@ public class CachedLocalTable implements LocalHBaseState {
 scan.setFilter(skipScanFilter);
 }
 
-HashMap> rowKeyPtrToCells =
+Map> rowKeyPtrToCells =
 new HashMap>();
 try (RegionScanner scanner = region.getScanner(scan)) {
 boolean more = true;



[phoenix] branch master updated: PHOENIX-6115 Avoid scanning prior row state for uncovered local indexes on immutable tables.

2020-09-01 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new b0f22e6  PHOENIX-6115 Avoid scanning prior row state for uncovered 
local indexes on immutable tables.
b0f22e6 is described below

commit b0f22e66874676806b12a9abaf4e72570aadfff9
Author: Lars 
AuthorDate: Tue Sep 1 10:10:39 2020 -0700

PHOENIX-6115 Avoid scanning prior row state for uncovered local indexes on 
immutable tables.
---
 .../hbase/index/covered/data/CachedLocalTable.java  | 21 +
 1 file changed, 13 insertions(+), 8 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/CachedLocalTable.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/CachedLocalTable.java
index 2fd91f7..c04796d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/CachedLocalTable.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/CachedLocalTable.java
@@ -21,8 +21,10 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.Cell;
@@ -44,14 +46,12 @@ import org.apache.phoenix.schema.types.PVarbinary;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Sets;
 
-import java.util.HashMap;
-
 public class CachedLocalTable implements LocalHBaseState {
 
-private final HashMap> rowKeyPtrToCells;
+private final Map> rowKeyPtrToCells;
 private final Region region;
 
-private CachedLocalTable(HashMap> 
rowKeyPtrToCells, Region region) {
+private CachedLocalTable(Map> 
rowKeyPtrToCells, Region region) {
 this.rowKeyPtrToCells = rowKeyPtrToCells;
 this.region = region;
 }
@@ -95,7 +95,7 @@ public class CachedLocalTable implements LocalHBaseState {
 }
 
 @VisibleForTesting
-public static CachedLocalTable build(HashMap> rowKeyPtrToCells) {
+public static CachedLocalTable build(Map> 
rowKeyPtrToCells) {
 return new CachedLocalTable(rowKeyPtrToCells, null);
 }
 
@@ -105,7 +105,7 @@ public class CachedLocalTable implements LocalHBaseState {
 Region region) throws IOException {
 if(indexMetaData.getReplayWrite() != null)
 {
-return new CachedLocalTable(new 
HashMap>(), region);
+return new CachedLocalTable(Collections.emptyMap(), region);
 }
 return 
preScanAllRequiredRows(dataTableMutationsWithSameRowKeyAndTimestamp, 
indexMetaData, region);
 }
@@ -124,12 +124,17 @@ public class CachedLocalTable implements LocalHBaseState {
 Collection 
dataTableMutationsWithSameRowKeyAndTimestamp,
 PhoenixIndexMetaData indexMetaData,
 Region region) throws IOException {
-List indexTableMaintainers = 
indexMetaData.getIndexMaintainers();
 Set keys = new 
HashSet(dataTableMutationsWithSameRowKeyAndTimestamp.size());
 for (Mutation mutation : dataTableMutationsWithSameRowKeyAndTimestamp) 
{
+  if (indexMetaData.requiresPriorRowState(mutation)) {
 keys.add(PVarbinary.INSTANCE.getKeyRange(mutation.getRow()));
+  }
+}
+if (keys.isEmpty()) {
+return new CachedLocalTable(Collections.emptyMap(), region);
 }
 
+List indexTableMaintainers = 
indexMetaData.getIndexMaintainers();
 Set getterColumnReferences = Sets.newHashSet();
 for (IndexMaintainer indexTableMaintainer : indexTableMaintainers) {
 getterColumnReferences.addAll(
@@ -162,7 +167,7 @@ public class CachedLocalTable implements LocalHBaseState {
 scan.setFilter(skipScanFilter);
 }
 
-HashMap> rowKeyPtrToCells =
+Map> rowKeyPtrToCells =
 new HashMap>();
 try (RegionScanner scanner = region.getScanner(scan)) {
 boolean more = true;



[phoenix] branch 4.x updated: PHOENIX-6106 Speed up ConcurrentMutationsExtendedIT.

2020-08-26 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 3c2fa78  PHOENIX-6106 Speed up ConcurrentMutationsExtendedIT.
3c2fa78 is described below

commit 3c2fa78b52d0fd48f111383937e3266a5ec34de0
Author: Lars 
AuthorDate: Wed Aug 26 20:03:56 2020 -0700

PHOENIX-6106 Speed up ConcurrentMutationsExtendedIT.
---
 .../java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
index 1c8f7ad..0a52c66 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
@@ -271,7 +271,7 @@ public class ConcurrentMutationsExtendedIT extends 
ParallelStatsDisabledIT {
 verifyIndexTable(tableName, indexName, conn);
 }
 
-@Test @Repeat(5)
+@Test
 public void testConcurrentUpserts() throws Exception {
 int nThreads = 4;
 final int batchSize = 200;



[phoenix] branch master updated: PHOENIX-6106 Speed up ConcurrentMutationsExtendedIT.

2020-08-26 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 020dd0b  PHOENIX-6106 Speed up ConcurrentMutationsExtendedIT.
020dd0b is described below

commit 020dd0b92b7b860fdd9f8b3d62e36af8de8ec9f7
Author: Lars 
AuthorDate: Wed Aug 26 20:02:57 2020 -0700

PHOENIX-6106 Speed up ConcurrentMutationsExtendedIT.
---
 .../java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
index d35451a..9389d0c 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
@@ -227,7 +227,7 @@ public class ConcurrentMutationsExtendedIT extends 
ParallelStatsDisabledIT {
 verifyIndexTable(tableName, indexName, conn);
 }
 
-@Test @Repeat(5)
+@Test
 public void testConcurrentUpserts() throws Exception {
 int nThreads = 4;
 final int batchSize = 200;



[phoenix] branch 4.x updated: PHOENIX-6101 Avoid duplicate work between local and global indexes.

2020-08-25 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 81d404b  PHOENIX-6101 Avoid duplicate work between local and global 
indexes.
81d404b is described below

commit 81d404b4b02904211d95e86b492c8b52ee1bbcff
Author: Lars 
AuthorDate: Tue Aug 25 12:50:41 2020 -0700

PHOENIX-6101 Avoid duplicate work between local and global indexes.
---
 .../phoenix/hbase/index/IndexRegionObserver.java   | 106 -
 1 file changed, 63 insertions(+), 43 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
index 49b5509..bcf718c 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Increment;
@@ -429,10 +430,20 @@ public class IndexRegionObserver extends 
BaseRegionObserver {
 return context.multiMutationMap.values();
 }
 
-public static void setTimestamp(Mutation m, long ts) throws IOException {
-for (List cells : m.getFamilyCellMap().values()) {
-for (Cell cell : cells) {
-CellUtil.setTimestamp(cell, ts);
+public static void setTimestamps(MiniBatchOperationInProgress 
miniBatchOp, IndexBuildManager builder, long ts) throws IOException {
+for (Integer i = 0; i < miniBatchOp.size(); i++) {
+if (miniBatchOp.getOperationStatus(i) == IGNORE) {
+continue;
+}
+Mutation m = miniBatchOp.getOperation(i);
+// skip this mutation if we aren't enabling indexing
+if (!builder.isEnabled(m)) {
+continue;
+}
+for (List cells : m.getFamilyCellMap().values()) {
+for (Cell cell : cells) {
+CellUtil.setTimestamp(cell, ts);
+}
 }
 }
 }
@@ -502,9 +513,6 @@ public class IndexRegionObserver extends BaseRegionObserver 
{
 if (!this.builder.isEnabled(m)) {
 continue;
 }
-// We update the time stamp of the data table to prevent 
overlapping time stamps (which prevents index
-// inconsistencies as this case isn't handled correctly currently).
-setTimestamp(m, now);
 if (m instanceof Put) {
 ImmutableBytesPtr rowKeyPtr = new 
ImmutableBytesPtr(m.getRow());
 Pair dataRowState = 
context.dataRowStates.get(rowKeyPtr);
@@ -554,13 +562,13 @@ public class IndexRegionObserver extends 
BaseRegionObserver {
  * The index update generation for local indexes uses the existing index 
update generation code (i.e.,
  * the {@link IndexBuilder} implementation).
  */
-private void 
handleLocalIndexUpdates(ObserverContext c,
+private void handleLocalIndexUpdates(TableName table,
  
MiniBatchOperationInProgress miniBatchOp,
  Collection 
pendingMutations,
  PhoenixIndexMetaData indexMetaData) 
throws Throwable {
 ListMultimap> 
indexUpdates = ArrayListMultimap.>create();
 this.builder.getIndexUpdates(indexUpdates, miniBatchOp, 
pendingMutations, indexMetaData);
-byte[] tableName = 
c.getEnvironment().getRegion().getTableDesc().getTableName().getName();
+byte[] tableName = table.getName();
 HTableInterfaceReference hTableInterfaceReference =
 new HTableInterfaceReference(new ImmutableBytesPtr(tableName));
 List> localIndexUpdates = 
indexUpdates.removeAll(hTableInterfaceReference);
@@ -685,10 +693,7 @@ public class IndexRegionObserver extends 
BaseRegionObserver {
  * unverified status. In phase 2, data table mutations are applied. In 
phase 3, the status for an index table row is
  * either set to "verified" or the row is deleted.
  */
-private boolean 
preparePreIndexMutations(ObserverContext c,
-  
MiniBatchOperationInProgress miniBatchOp,
-  BatchMutateContext context,
-  Collection 
pendingMutations,
+private void preparePreIndexMutations(Batc

[phoenix] branch master updated: PHOENIX-6101 Avoid duplicate work between local and global indexes.

2020-08-25 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new c109a61  PHOENIX-6101 Avoid duplicate work between local and global 
indexes.
c109a61 is described below

commit c109a61890fd2ea14a7274808b43298b6e221b11
Author: Lars 
AuthorDate: Tue Aug 25 12:31:18 2020 -0700

PHOENIX-6101 Avoid duplicate work between local and global indexes.
---
 .../phoenix/hbase/index/IndexRegionObserver.java   | 107 -
 1 file changed, 63 insertions(+), 44 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
index 2d0cf51..50e1f68 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Increment;
@@ -443,10 +444,20 @@ public class IndexRegionObserver implements 
RegionObserver, RegionCoprocessor {
 return context.multiMutationMap.values();
 }
 
-public static void setTimestamp(Mutation m, long ts) throws IOException {
-for (List cells : m.getFamilyCellMap().values()) {
-for (Cell cell : cells) {
-CellUtil.setTimestamp(cell, ts);
+public static void setTimestamps(MiniBatchOperationInProgress 
miniBatchOp, IndexBuildManager builder, long ts) throws IOException {
+for (Integer i = 0; i < miniBatchOp.size(); i++) {
+if (miniBatchOp.getOperationStatus(i) == IGNORE) {
+continue;
+}
+Mutation m = miniBatchOp.getOperation(i);
+// skip this mutation if we aren't enabling indexing
+if (!builder.isEnabled(m)) {
+continue;
+}
+for (List cells : m.getFamilyCellMap().values()) {
+for (Cell cell : cells) {
+CellUtil.setTimestamp(cell, ts);
+}
 }
 }
 }
@@ -516,10 +527,6 @@ public class IndexRegionObserver implements 
RegionObserver, RegionCoprocessor {
 if (!this.builder.isEnabled(m)) {
 continue;
 }
-// Unless we're replaying edits to rebuild the index, we update 
the time stamp
-// of the data table to prevent overlapping time stamps (which 
prevents index
-// inconsistencies as this case isn't handled correctly currently).
-setTimestamp(m, now);
 if (m instanceof Put) {
 ImmutableBytesPtr rowKeyPtr = new 
ImmutableBytesPtr(m.getRow());
 Pair dataRowState = 
context.dataRowStates.get(rowKeyPtr);
@@ -569,13 +576,13 @@ public class IndexRegionObserver implements 
RegionObserver, RegionCoprocessor {
  * The index update generation for local indexes uses the existing index 
update generation code (i.e.,
  * the {@link IndexBuilder} implementation).
  */
-private void 
handleLocalIndexUpdates(ObserverContext c,
+private void handleLocalIndexUpdates(TableName table,
  
MiniBatchOperationInProgress miniBatchOp,
  Collection 
pendingMutations,
  PhoenixIndexMetaData indexMetaData) 
throws Throwable {
 ListMultimap> 
indexUpdates = ArrayListMultimap.>create();
 this.builder.getIndexUpdates(indexUpdates, miniBatchOp, 
pendingMutations, indexMetaData);
-byte[] tableName = 
c.getEnvironment().getRegion().getTableDescriptor().getTableName().getName();
+byte[] tableName = table.getName();
 HTableInterfaceReference hTableInterfaceReference =
 new HTableInterfaceReference(new ImmutableBytesPtr(tableName));
 List> localIndexUpdates = 
indexUpdates.removeAll(hTableInterfaceReference);
@@ -702,10 +709,7 @@ public class IndexRegionObserver implements 
RegionObserver, RegionCoprocessor {
  * unverified status. In phase 2, data table mutations are applied. In 
phase 3, the status for an index table row is
  * either set to "verified" or the row is deleted.
  */
-private boolean 
preparePreIndexMutations(ObserverContext c,
-  
MiniBatchOperationInProgress miniBatchOp,
-  Batc

[phoenix] branch 4.x updated: PHOENIX-6097 Improve LOCAL index consistency tests.

2020-08-24 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new c1f3f19  PHOENIX-6097 Improve LOCAL index consistency tests.
c1f3f19 is described below

commit c1f3f194bec0efaa83930aad16dd319b44bb27b0
Author: Lars 
AuthorDate: Mon Aug 24 09:55:58 2020 -0700

PHOENIX-6097 Improve LOCAL index consistency tests.
---
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 61 +-
 1 file changed, 47 insertions(+), 14 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 012bbca..14e85ab 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -112,25 +112,58 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 Connection conn = getConnection();
 conn.setAutoCommit(true);
 
-conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT) SPLIT ON (2000)");
+conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT) SPLIT ON (4000)");
 conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1)");
-conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(rand() * 4000, rand())");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(rand() * 8000, rand())");
 
-ResultSet rs;
-for (int i=0; i<15; i++) {
-conn.createStatement().execute("UPSERT INTO " + tableName + " 
SELECT rand() * 4000, rand() FROM " + tableName);
+for (int i=0; i<16; i++) {
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
SELECT rand() * 8000, rand() FROM " + tableName);
+assertEquals(getCountViaIndex(conn, tableName, null), 
getCountViaIndex(conn, tableName, indexName));
+}
+}
 
-rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + 
tableName);
-rs.next();
-int indexCount = rs.getInt(1);
-rs.close();
+@Test
+public void testLocalIndexConsistencyWithGlobalMix() throws Exception {
+if (isNamespaceMapped) {
+return;
+}
+String tableName = schemaName + "." + generateUniqueName();
+String localIndexNames[] = {"L_" + generateUniqueName(), "L_" + 
generateUniqueName()};
+String globalIndexNames[] = {"G_" + generateUniqueName(), "G_" + 
generateUniqueName()};
 
-rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ 
COUNT(*) FROM " + tableName);
-rs.next();
-int tableCount = rs.getInt(1);
-rs.close();
+Connection conn = getConnection();
+conn.setAutoCommit(true);
 
-assertEquals(indexCount, tableCount);
+conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT, v2 FLOAT, v3 FLOAT, v4 FLOAT) SPLIT ON (4000)");
+
+int idx=1;
+for (String indexName : localIndexNames) {
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + 
" ON " + tableName + "(v" + idx++ +")");
+}
+for (String indexName : globalIndexNames) {
+conn.createStatement().execute("CREATE INDEX " + indexName + " ON 
" + tableName + "(v" + idx++ +")");
+}
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(rand() * 8000, rand())");
+
+for (int i=0; i<16; i++) {
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
SELECT rand() * 8000, rand() FROM " + tableName);
+
+int count = getCountViaIndex(conn, tableName, null);
+for (String indexName : localIndexNames) {
+assertEquals(count, getCountViaIndex(conn, tableName, 
indexName));
+}
+
+for (String indexName : globalIndexNames) {
+assertEquals(count, getCountViaIndex(conn, tableName, 
indexName));
+}
+}
+}
+
+private int getCountViaIndex(Connection conn, String tableName, String 
indexName) throws SQLException {
+String hint = indexName == null ? "NO_INDEX" : "INDEX(" + tableName + 
" " + indexName + ")";
+try (ResultSet rs = conn.createStatement().executeQuery("SELECT /*+ " 
+ hint + " */ COUNT(*) FROM " + tableName)) {
+rs.next();
+return rs.getInt(1);
 }
 }
 



[phoenix] branch master updated: PHOENIX-6097 Improve LOCAL index consistency tests.

2020-08-24 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 3113307  PHOENIX-6097 Improve LOCAL index consistency tests.
3113307 is described below

commit 3113307a313c409343255c84f17e766ebdbd1d8a
Author: Lars 
AuthorDate: Mon Aug 24 09:47:20 2020 -0700

PHOENIX-6097 Improve LOCAL index consistency tests.
---
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 61 +-
 1 file changed, 47 insertions(+), 14 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 0965ce1..3d5a323 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -113,25 +113,58 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 Connection conn = getConnection();
 conn.setAutoCommit(true);
 
-conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT) SPLIT ON (2000)");
+conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT) SPLIT ON (4000)");
 conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1)");
-conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(rand() * 4000, rand())");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(rand() * 8000, rand())");
 
-ResultSet rs;
-for (int i=0; i<15; i++) {
-conn.createStatement().execute("UPSERT INTO " + tableName + " 
SELECT rand() * 4000, rand() FROM " + tableName);
+for (int i=0; i<16; i++) {
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
SELECT rand() * 8000, rand() FROM " + tableName);
+assertEquals(getCountViaIndex(conn, tableName, null), 
getCountViaIndex(conn, tableName, indexName));
+}
+}
 
-rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + 
tableName);
-rs.next();
-int indexCount = rs.getInt(1);
-rs.close();
+@Test
+public void testLocalIndexConsistencyWithGlobalMix() throws Exception {
+if (isNamespaceMapped) {
+return;
+}
+String tableName = schemaName + "." + generateUniqueName();
+String localIndexNames[] = {"L_" + generateUniqueName(), "L_" + 
generateUniqueName()};
+String globalIndexNames[] = {"G_" + generateUniqueName(), "G_" + 
generateUniqueName()};
 
-rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ 
COUNT(*) FROM " + tableName);
-rs.next();
-int tableCount = rs.getInt(1);
-rs.close();
+Connection conn = getConnection();
+conn.setAutoCommit(true);
 
-assertEquals(indexCount, tableCount);
+conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT, v2 FLOAT, v3 FLOAT, v4 FLOAT) SPLIT ON (4000)");
+
+int idx=1;
+for (String indexName : localIndexNames) {
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + 
" ON " + tableName + "(v" + idx++ +")");
+}
+for (String indexName : globalIndexNames) {
+conn.createStatement().execute("CREATE INDEX " + indexName + " ON 
" + tableName + "(v" + idx++ +")");
+}
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(rand() * 8000, rand())");
+
+for (int i=0; i<16; i++) {
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
SELECT rand() * 8000, rand() FROM " + tableName);
+
+int count = getCountViaIndex(conn, tableName, null);
+for (String indexName : localIndexNames) {
+assertEquals(count, getCountViaIndex(conn, tableName, 
indexName));
+}
+
+for (String indexName : globalIndexNames) {
+assertEquals(count, getCountViaIndex(conn, tableName, 
indexName));
+}
+}
+}
+
+private int getCountViaIndex(Connection conn, String tableName, String 
indexName) throws SQLException {
+String hint = indexName == null ? "NO_INDEX" : "INDEX(" + tableName + 
" " + indexName + ")";
+try (ResultSet rs = conn.createStatement().executeQuery("SELECT /*+ " 
+ hint + " */ COUNT(*) FROM " + tableName)) {
+rs.next();
+return rs.getInt(1);
 }
 }
 



[phoenix] branch 4.x updated: Update jacoco plugin version to 0.8.5.

2020-08-23 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 355d95a  Update jacoco plugin version to 0.8.5.
355d95a is described below

commit 355d95a4762c3ccac1be35659f3c02c385e17b3b
Author: Lars 
AuthorDate: Sun Aug 23 11:42:52 2020 -0700

Update jacoco plugin version to 0.8.5.
---
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pom.xml b/pom.xml
index b19cd3e..b98c498 100644
--- a/pom.xml
+++ b/pom.xml
@@ -137,7 +137,7 @@
 2.9
 
1.9.1
 3.0.0-M3
-0.7.9
+0.8.5
 
 
 8



[phoenix] branch master updated: Update jacoco plugin version to 0.8.5.

2020-08-23 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 3ec4599  Update jacoco plugin version to 0.8.5.
3ec4599 is described below

commit 3ec45999dbd38d689da2d4884bb1054107e55a1b
Author: Lars 
AuthorDate: Sun Aug 23 11:32:39 2020 -0700

Update jacoco plugin version to 0.8.5.
---
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pom.xml b/pom.xml
index 7eda4ed..81bdff7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -132,7 +132,7 @@
 
1.9.1
 3.0.0-M3
 
${antlr.version}
-0.7.9
+0.8.5
 
 
 8



[phoenix] branch 4.x updated: Local indexes get out of sync after changes for global consistent indexes.

2020-08-22 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new daa6816  Local indexes get out of sync after changes for global 
consistent indexes.
daa6816 is described below

commit daa6816dcb3ac035bf8553e6bf2ff8a18e80e6e4
Author: Lars 
AuthorDate: Sat Aug 22 11:55:24 2020 -0700

Local indexes get out of sync after changes for global consistent indexes.
---
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 33 ++
 .../phoenix/hbase/index/IndexRegionObserver.java   | 70 --
 2 files changed, 71 insertions(+), 32 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 481ce1c..012bbca 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -102,6 +102,39 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 }
 
 @Test
+public void testLocalIndexConsistency() throws Exception {
+if (isNamespaceMapped) {
+return;
+}
+String tableName = schemaName + "." + generateUniqueName();
+String indexName = "IDX_" + generateUniqueName();
+
+Connection conn = getConnection();
+conn.setAutoCommit(true);
+
+conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT) SPLIT ON (2000)");
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1)");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(rand() * 4000, rand())");
+
+ResultSet rs;
+for (int i=0; i<15; i++) {
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
SELECT rand() * 4000, rand() FROM " + tableName);
+
+rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + 
tableName);
+rs.next();
+int indexCount = rs.getInt(1);
+rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ 
COUNT(*) FROM " + tableName);
+rs.next();
+int tableCount = rs.getInt(1);
+rs.close();
+
+assertEquals(indexCount, tableCount);
+}
+}
+
+@Test
 public void testUseUncoveredLocalIndexWithPrefix() throws Exception {
 String tableName = schemaName + "." + generateUniqueName();
 String indexName = "IDX_" + generateUniqueName();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
index e24b8e2..49b5509 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
@@ -685,7 +685,7 @@ public class IndexRegionObserver extends BaseRegionObserver 
{
  * unverified status. In phase 2, data table mutations are applied. In 
phase 3, the status for an index table row is
  * either set to "verified" or the row is deleted.
  */
-private void 
preparePreIndexMutations(ObserverContext c,
+private boolean 
preparePreIndexMutations(ObserverContext c,
   
MiniBatchOperationInProgress miniBatchOp,
   BatchMutateContext context,
   Collection 
pendingMutations,
@@ -699,13 +699,6 @@ public class IndexRegionObserver extends 
BaseRegionObserver {
 current = NullSpan.INSTANCE;
 }
 current.addTimelineAnnotation("Built index updates, doing 
preStep");
-// Handle local index updates
-for (IndexMaintainer indexMaintainer : maintainers) {
-if (indexMaintainer.isLocalIndex()) {
-handleLocalIndexUpdates(c, miniBatchOp, pendingMutations, 
indexMetaData);
-break;
-}
-}
 // The rest of this method is for handling global index updates
 context.indexUpdates = 
ArrayListMultimap.>create();
 prepareIndexMutations(context, maintainers, now);
@@ -713,6 +706,9 @@ public class IndexRegionObserver extends BaseRegionObserver 
{
 context.preIndexUpdates = 
ArrayListMultimap.create();
 int updateCount = 0;
 for (IndexMaintainer indexMaintainer : maintainers) {
+if (indexMaintain

[phoenix] branch master updated: Local indexes get out of sync after changes for global consistent indexes.

2020-08-22 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new dbc98ac  Local indexes get out of sync after changes for global 
consistent indexes.
dbc98ac is described below

commit dbc98acd1f09d4d8c360a84f9d126b4e03a73fe0
Author: Lars 
AuthorDate: Sat Aug 22 10:50:53 2020 -0700

Local indexes get out of sync after changes for global consistent indexes.
---
 .../apache/phoenix/end2end/index/LocalIndexIT.java | 33 ++
 .../phoenix/hbase/index/IndexRegionObserver.java   | 70 --
 2 files changed, 71 insertions(+), 32 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 724da6e..0965ce1 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -103,6 +103,39 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 }
 
 @Test
+public void testLocalIndexConsistency() throws Exception {
+if (isNamespaceMapped) {
+return;
+}
+String tableName = schemaName + "." + generateUniqueName();
+String indexName = "IDX_" + generateUniqueName();
+
+Connection conn = getConnection();
+conn.setAutoCommit(true);
+
+conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT) SPLIT ON (2000)");
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v1)");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(rand() * 4000, rand())");
+
+ResultSet rs;
+for (int i=0; i<15; i++) {
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
SELECT rand() * 4000, rand() FROM " + tableName);
+
+rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + 
tableName);
+rs.next();
+int indexCount = rs.getInt(1);
+rs.close();
+
+rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ 
COUNT(*) FROM " + tableName);
+rs.next();
+int tableCount = rs.getInt(1);
+rs.close();
+
+assertEquals(indexCount, tableCount);
+}
+}
+
+@Test
 public void testUseUncoveredLocalIndexWithPrefix() throws Exception {
 String tableName = schemaName + "." + generateUniqueName();
 String indexName = "IDX_" + generateUniqueName();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
index bfeadcb..2d0cf51 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
@@ -702,7 +702,7 @@ public class IndexRegionObserver implements RegionObserver, 
RegionCoprocessor {
  * unverified status. In phase 2, data table mutations are applied. In 
phase 3, the status for an index table row is
  * either set to "verified" or the row is deleted.
  */
-private void 
preparePreIndexMutations(ObserverContext c,
+private boolean 
preparePreIndexMutations(ObserverContext c,
   
MiniBatchOperationInProgress miniBatchOp,
   BatchMutateContext context,
   Collection 
pendingMutations,
@@ -716,13 +716,6 @@ public class IndexRegionObserver implements 
RegionObserver, RegionCoprocessor {
 current = NullSpan.INSTANCE;
 }
 current.addTimelineAnnotation("Built index updates, doing 
preStep");
-// Handle local index updates
-for (IndexMaintainer indexMaintainer : maintainers) {
-if (indexMaintainer.isLocalIndex()) {
-handleLocalIndexUpdates(c, miniBatchOp, pendingMutations, 
indexMetaData);
-break;
-}
-}
 // The rest of this method is for handling global index updates
 context.indexUpdates = 
ArrayListMultimap.>create();
 prepareIndexMutations(context, maintainers, now);
@@ -730,6 +723,9 @@ public class IndexRegionObserver implements RegionObserver, 
RegionCoprocessor {
 context.preIndexUpdates = 
ArrayListMultimap.create();
 int updateCount = 0;
 for (IndexM

[phoenix] branch 4.x updated: PHOENIX-6000 Client side DELETEs should use local indexes for filtering.

2020-07-16 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new fd22478  PHOENIX-6000 Client side DELETEs should use local indexes for 
filtering.
fd22478 is described below

commit fd22478d522ce5dcfc43c67696018a360e043d6c
Author: Lars 
AuthorDate: Thu Jul 16 10:27:18 2020 -0700

PHOENIX-6000 Client side DELETEs should use local indexes for filtering.
---
 .../end2end/index/GlobalIndexOptimizationIT.java   | 55 --
 .../org/apache/phoenix/compile/DeleteCompiler.java | 22 +
 2 files changed, 53 insertions(+), 24 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
index 5c2558e..0d0556b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
@@ -50,14 +50,61 @@ public class GlobalIndexOptimizationIT extends 
ParallelStatsDisabledIT {
 conn.close();
 }
 
-private void createIndex(String indexName, String tableName, String 
columns) throws SQLException {
+private void createIndex(String indexName, String tableName, String 
columns, String includes, boolean local) throws SQLException {
 Connection conn = DriverManager.getConnection(getUrl());
-String ddl = "CREATE INDEX " + indexName + " ON " + tableName + " (" + 
columns + ")";
+String ddl = "CREATE " + (local ? "LOCAL " : "") + "INDEX " + 
indexName + " ON " + tableName + " (" + columns + ")" + (includes != null ? " 
INCLUDE (" + includes + ")" : "");
 conn.createStatement().execute(ddl);
 conn.close();
 }
 
 @Test
+public void testIndexDeleteOptimizationWithLocalIndex() throws Exception {
+String dataTableName = generateUniqueName();
+String indexTableName = generateUniqueName();
+createBaseTable(dataTableName, null, null, false);
+// create a local index that only covers k3
+createIndex(indexTableName+"L", dataTableName, "k3", null, true);
+// create a gloval index covering v1, and k3
+createIndex(indexTableName+"G", dataTableName, "v1", "k3", false);
+
+String query = "DELETE FROM " + dataTableName + " where k3 < 100";
+try (Connection conn1 = DriverManager.getConnection(getUrl())) {
+conn1.createStatement().execute("UPSERT INTO " + dataTableName + " 
values(TO_CHAR(rand()*100),rand()*1,rand()*1,rand()*1,TO_CHAR(rand()*100))");
+for (int i=0; i<16; i++) {
+conn1.createStatement().execute("UPSERT INTO " + dataTableName 
+ " SELECT 
TO_CHAR(rand()*100),rand()*1,rand()*1,rand()*1,TO_CHAR(rand()*100) 
FROM " + dataTableName);
+}
+ResultSet rs = conn1.createStatement().executeQuery("EXPLAIN "+ 
query);
+String expected =
+"DELETE ROWS CLIENT SELECT\n" +
+"CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + dataTableName 
+" [1,*] - [1,100]\n" +
+"SERVER FILTER BY FIRST KEY ONLY\n" +
+"CLIENT MERGE SORT";
+String actual = QueryUtil.getExplainPlan(rs);
+assertEquals(expected, actual);
+rs = conn1.createStatement().executeQuery("SELECT COUNT(*) FROM " 
+ dataTableName);
+rs.next();
+int count = rs.getInt(1);
+int deleted = conn1.createStatement().executeUpdate(query);
+int expectedCount = count - deleted;
+
+rs = conn1.createStatement().executeQuery("SELECT COUNT(*) FROM " 
+ dataTableName);
+rs.next();
+count = rs.getInt(1);
+assertEquals(expectedCount, count);
+
+rs = conn1.createStatement().executeQuery("SELECT COUNT(*) FROM " 
+ indexTableName+"L");
+rs.next();
+count = rs.getInt(1);
+assertEquals(expectedCount, count);
+
+rs = conn1.createStatement().executeQuery("SELECT COUNT(*) FROM " 
+ indexTableName+"G");
+rs.next();
+count = rs.getInt(1);
+assertEquals(expectedCount, count);
+}
+}
+
+@Test
 public void testGlobalIndexOptimization() throws Exception {
 String dataTableName = generateUniqueName();
 

[phoenix] branch master updated: PHOENIX-6000 Client side DELETEs should use local indexes for filtering.

2020-07-16 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 02c9d5d  PHOENIX-6000 Client side DELETEs should use local indexes for 
filtering.
02c9d5d is described below

commit 02c9d5d15be727efaba8ce922857b4ba8d6f129b
Author: Lars 
AuthorDate: Thu Jul 16 10:26:31 2020 -0700

PHOENIX-6000 Client side DELETEs should use local indexes for filtering.
---
 .../end2end/index/GlobalIndexOptimizationIT.java   | 55 --
 .../org/apache/phoenix/compile/DeleteCompiler.java | 22 +
 ...eleteCompiler.java => DeleteCompiler.java.orig} |  0
 3 files changed, 53 insertions(+), 24 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
index 5c2558e..0d0556b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
@@ -50,14 +50,61 @@ public class GlobalIndexOptimizationIT extends 
ParallelStatsDisabledIT {
 conn.close();
 }
 
-private void createIndex(String indexName, String tableName, String 
columns) throws SQLException {
+private void createIndex(String indexName, String tableName, String 
columns, String includes, boolean local) throws SQLException {
 Connection conn = DriverManager.getConnection(getUrl());
-String ddl = "CREATE INDEX " + indexName + " ON " + tableName + " (" + 
columns + ")";
+String ddl = "CREATE " + (local ? "LOCAL " : "") + "INDEX " + 
indexName + " ON " + tableName + " (" + columns + ")" + (includes != null ? " 
INCLUDE (" + includes + ")" : "");
 conn.createStatement().execute(ddl);
 conn.close();
 }
 
 @Test
+public void testIndexDeleteOptimizationWithLocalIndex() throws Exception {
+String dataTableName = generateUniqueName();
+String indexTableName = generateUniqueName();
+createBaseTable(dataTableName, null, null, false);
+// create a local index that only covers k3
+createIndex(indexTableName+"L", dataTableName, "k3", null, true);
+// create a gloval index covering v1, and k3
+createIndex(indexTableName+"G", dataTableName, "v1", "k3", false);
+
+String query = "DELETE FROM " + dataTableName + " where k3 < 100";
+try (Connection conn1 = DriverManager.getConnection(getUrl())) {
+conn1.createStatement().execute("UPSERT INTO " + dataTableName + " 
values(TO_CHAR(rand()*100),rand()*1,rand()*1,rand()*1,TO_CHAR(rand()*100))");
+for (int i=0; i<16; i++) {
+conn1.createStatement().execute("UPSERT INTO " + dataTableName 
+ " SELECT 
TO_CHAR(rand()*100),rand()*1,rand()*1,rand()*1,TO_CHAR(rand()*100) 
FROM " + dataTableName);
+}
+ResultSet rs = conn1.createStatement().executeQuery("EXPLAIN "+ 
query);
+String expected =
+"DELETE ROWS CLIENT SELECT\n" +
+"CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + dataTableName 
+" [1,*] - [1,100]\n" +
+"SERVER FILTER BY FIRST KEY ONLY\n" +
+"CLIENT MERGE SORT";
+String actual = QueryUtil.getExplainPlan(rs);
+assertEquals(expected, actual);
+rs = conn1.createStatement().executeQuery("SELECT COUNT(*) FROM " 
+ dataTableName);
+rs.next();
+int count = rs.getInt(1);
+int deleted = conn1.createStatement().executeUpdate(query);
+int expectedCount = count - deleted;
+
+rs = conn1.createStatement().executeQuery("SELECT COUNT(*) FROM " 
+ dataTableName);
+rs.next();
+count = rs.getInt(1);
+assertEquals(expectedCount, count);
+
+rs = conn1.createStatement().executeQuery("SELECT COUNT(*) FROM " 
+ indexTableName+"L");
+rs.next();
+count = rs.getInt(1);
+assertEquals(expectedCount, count);
+
+rs = conn1.createStatement().executeQuery("SELECT COUNT(*) FROM " 
+ indexTableName+"G");
+rs.next();
+count = rs.getInt(1);
+assertEquals(expectedCount, count);
+}
+}
+
+@Test
 public void testGlobalIndexOptimization() throws Exception {

[phoenix] branch master updated: PHOENIX-5096 Local index region pruning is not working as expected.

2019-12-24 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 356f4cd  PHOENIX-5096 Local index region pruning is not working as 
expected.
356f4cd is described below

commit 356f4cd7d43bccb9538a5a2b94863b1c52cd9aad
Author: Lars Hofhansl 
AuthorDate: Tue Dec 24 06:26:39 2019 -0800

PHOENIX-5096 Local index region pruning is not working as expected.
---
 .../phoenix/iterate/BaseResultIterators.java   |  9 
 .../apache/phoenix/compile/QueryCompilerTest.java  | 60 ++
 2 files changed, 69 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 8fd368a..12a6b3a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -1023,6 +1023,15 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 endKey = regionBoundaries.get(regionIndex);
 }
 if (isLocalIndex) {
+if (dataPlan != null && 
dataPlan.getTableRef().getTable().getType() != PTableType.INDEX) { // Sanity 
check
+ScanRanges dataScanRanges = 
dataPlan.getContext().getScanRanges();
+// we can skip a region completely for local indexes 
if the data plan does not intersect
+if 
(!dataScanRanges.intersectRegion(regionInfo.getStartKey(), 
regionInfo.getEndKey(), false)) {
+currentKeyBytes = endKey;
+regionIndex++;
+continue;
+}
+}
 // Only attempt further pruning if the prefix range is 
using
 // a skip scan since we've already pruned the range of 
regions
 // based on the start/stop key.
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index 31369be..c4c47e7 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -4877,6 +4877,66 @@ public class QueryCompilerTest extends 
BaseConnectionlessQueryTest {
 }
 
 @Test
+public void testLocalIndexRegionPruning() throws SQLException {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+conn.createStatement().execute("CREATE TABLE T (\n" + 
+"A CHAR(1) NOT NULL,\n" + 
+"B CHAR(1) NOT NULL,\n" + 
+"C CHAR(1) NOT NULL,\n" + 
+"D CHAR(1),\n" + 
+"CONSTRAINT PK PRIMARY KEY (\n" + 
+"A,\n" + 
+"B,\n" + 
+"C\n" + 
+")\n" + 
+") SPLIT ON ('A','C','E','G','I')");
+
+conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(D)");
+
+// un-pruned, need to scan all six regions
+String query = "SELECT * FROM T WHERE D = 'C'";
+PhoenixStatement statement = 
conn.createStatement().unwrap(PhoenixStatement.class);
+QueryPlan plan = statement.optimizeQuery(query);
+assertEquals("IDX", 
plan.getContext().getCurrentTable().getTable().getName().getString());
+plan.iterator();
+assertEquals(6, plan.getScans().size());
+
+// fixing first part of the key, can limit scanning to two regions
+query = "SELECT * FROM T WHERE A = 'A' AND D = 'C'";
+statement = conn.createStatement().unwrap(PhoenixStatement.class);
+plan = statement.optimizeQuery(query);
+assertEquals("IDX", 
plan.getContext().getCurrentTable().getTable().getName().getString());
+plan.iterator();
+assertEquals(2, plan.getScans().size());
+
+// same with skipscan filter
+query = "SELECT * FROM T WHERE A IN ('A', 'C') AND D = 'C'";
+statement = conn.createStatement().unwrap(PhoenixStatement.class);
+plan = statement.optimizeQuery(query);
+assertEquals("IDX", 
plan.getContext().getCurrentTable().getTable().getName().getString());

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5096 Local index region pruning is not working as expected.

2019-12-24 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 214b487  PHOENIX-5096 Local index region pruning is not working as 
expected.
214b487 is described below

commit 214b487cbe87ab8571b4a310953799e25568f1d5
Author: Lars Hofhansl 
AuthorDate: Tue Dec 24 06:26:39 2019 -0800

PHOENIX-5096 Local index region pruning is not working as expected.
---
 .../phoenix/iterate/BaseResultIterators.java   |  9 
 .../apache/phoenix/compile/QueryCompilerTest.java  | 60 ++
 2 files changed, 69 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 45b4d4d..2dcc88b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -1023,6 +1023,15 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 endKey = regionBoundaries.get(regionIndex);
 }
 if (isLocalIndex) {
+if (dataPlan != null && 
dataPlan.getTableRef().getTable().getType() != PTableType.INDEX) { // Sanity 
check
+ScanRanges dataScanRanges = 
dataPlan.getContext().getScanRanges();
+// we can skip a region completely for local indexes 
if the data plan does not intersect
+if 
(!dataScanRanges.intersectRegion(regionInfo.getStartKey(), 
regionInfo.getEndKey(), false)) {
+currentKeyBytes = endKey;
+regionIndex++;
+continue;
+}
+}
 // Only attempt further pruning if the prefix range is 
using
 // a skip scan since we've already pruned the range of 
regions
 // based on the start/stop key.
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index 3dca5b6..f72c3f6 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -4877,6 +4877,66 @@ public class QueryCompilerTest extends 
BaseConnectionlessQueryTest {
 }
 
 @Test
+public void testLocalIndexRegionPruning() throws SQLException {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+conn.createStatement().execute("CREATE TABLE T (\n" + 
+"A CHAR(1) NOT NULL,\n" + 
+"B CHAR(1) NOT NULL,\n" + 
+"C CHAR(1) NOT NULL,\n" + 
+"D CHAR(1),\n" + 
+"CONSTRAINT PK PRIMARY KEY (\n" + 
+"A,\n" + 
+"B,\n" + 
+"C\n" + 
+")\n" + 
+") SPLIT ON ('A','C','E','G','I')");
+
+conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(D)");
+
+// un-pruned, need to scan all six regions
+String query = "SELECT * FROM T WHERE D = 'C'";
+PhoenixStatement statement = 
conn.createStatement().unwrap(PhoenixStatement.class);
+QueryPlan plan = statement.optimizeQuery(query);
+assertEquals("IDX", 
plan.getContext().getCurrentTable().getTable().getName().getString());
+plan.iterator();
+assertEquals(6, plan.getScans().size());
+
+// fixing first part of the key, can limit scanning to two regions
+query = "SELECT * FROM T WHERE A = 'A' AND D = 'C'";
+statement = conn.createStatement().unwrap(PhoenixStatement.class);
+plan = statement.optimizeQuery(query);
+assertEquals("IDX", 
plan.getContext().getCurrentTable().getTable().getName().getString());
+plan.iterator();
+assertEquals(2, plan.getScans().size());
+
+// same with skipscan filter
+query = "SELECT * FROM T WHERE A IN ('A', 'C') AND D = 'C'";
+statement = conn.createStatement().unwrap(PhoenixStatement.class);
+plan = statement.optimizeQuery(query);
+assertEquals("IDX", 
plan.getContext().getCurrentTable().getTab

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5096 Local index region pruning is not working as expected.

2019-12-24 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 2f9ad87  PHOENIX-5096 Local index region pruning is not working as 
expected.
2f9ad87 is described below

commit 2f9ad87e1c7d3ad879af490f2546a596516d2ccb
Author: Lars Hofhansl 
AuthorDate: Tue Dec 24 06:26:39 2019 -0800

PHOENIX-5096 Local index region pruning is not working as expected.
---
 .../phoenix/iterate/BaseResultIterators.java   |  9 
 .../apache/phoenix/compile/QueryCompilerTest.java  | 60 ++
 2 files changed, 69 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 45b4d4d..2dcc88b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -1023,6 +1023,15 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 endKey = regionBoundaries.get(regionIndex);
 }
 if (isLocalIndex) {
+if (dataPlan != null && 
dataPlan.getTableRef().getTable().getType() != PTableType.INDEX) { // Sanity 
check
+ScanRanges dataScanRanges = 
dataPlan.getContext().getScanRanges();
+// we can skip a region completely for local indexes 
if the data plan does not intersect
+if 
(!dataScanRanges.intersectRegion(regionInfo.getStartKey(), 
regionInfo.getEndKey(), false)) {
+currentKeyBytes = endKey;
+regionIndex++;
+continue;
+}
+}
 // Only attempt further pruning if the prefix range is 
using
 // a skip scan since we've already pruned the range of 
regions
 // based on the start/stop key.
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index e6337fa..b49aaf8 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -4870,6 +4870,66 @@ public class QueryCompilerTest extends 
BaseConnectionlessQueryTest {
 }
 
 @Test
+public void testLocalIndexRegionPruning() throws SQLException {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+conn.createStatement().execute("CREATE TABLE T (\n" + 
+"A CHAR(1) NOT NULL,\n" + 
+"B CHAR(1) NOT NULL,\n" + 
+"C CHAR(1) NOT NULL,\n" + 
+"D CHAR(1),\n" + 
+"CONSTRAINT PK PRIMARY KEY (\n" + 
+"A,\n" + 
+"B,\n" + 
+"C\n" + 
+")\n" + 
+") SPLIT ON ('A','C','E','G','I')");
+
+conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(D)");
+
+// un-pruned, need to scan all six regions
+String query = "SELECT * FROM T WHERE D = 'C'";
+PhoenixStatement statement = 
conn.createStatement().unwrap(PhoenixStatement.class);
+QueryPlan plan = statement.optimizeQuery(query);
+assertEquals("IDX", 
plan.getContext().getCurrentTable().getTable().getName().getString());
+plan.iterator();
+assertEquals(6, plan.getScans().size());
+
+// fixing first part of the key, can limit scanning to two regions
+query = "SELECT * FROM T WHERE A = 'A' AND D = 'C'";
+statement = conn.createStatement().unwrap(PhoenixStatement.class);
+plan = statement.optimizeQuery(query);
+assertEquals("IDX", 
plan.getContext().getCurrentTable().getTable().getName().getString());
+plan.iterator();
+assertEquals(2, plan.getScans().size());
+
+// same with skipscan filter
+query = "SELECT * FROM T WHERE A IN ('A', 'C') AND D = 'C'";
+statement = conn.createStatement().unwrap(PhoenixStatement.class);
+plan = statement.optimizeQuery(query);
+assertEquals("IDX", 
plan.getContext().getCurrentTable().getTab

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5096 Local index region pruning is not working as expected.

2019-12-24 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 1620c7d  PHOENIX-5096 Local index region pruning is not working as 
expected.
1620c7d is described below

commit 1620c7d7f6b8ce8ccf38e27e6bff1182ec6f7985
Author: Lars Hofhansl 
AuthorDate: Tue Dec 24 06:26:39 2019 -0800

PHOENIX-5096 Local index region pruning is not working as expected.
---
 .../phoenix/iterate/BaseResultIterators.java   |  9 
 .../apache/phoenix/compile/QueryCompilerTest.java  | 60 ++
 2 files changed, 69 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 45b4d4d..2dcc88b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -1023,6 +1023,15 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 endKey = regionBoundaries.get(regionIndex);
 }
 if (isLocalIndex) {
+if (dataPlan != null && 
dataPlan.getTableRef().getTable().getType() != PTableType.INDEX) { // Sanity 
check
+ScanRanges dataScanRanges = 
dataPlan.getContext().getScanRanges();
+// we can skip a region completely for local indexes 
if the data plan does not intersect
+if 
(!dataScanRanges.intersectRegion(regionInfo.getStartKey(), 
regionInfo.getEndKey(), false)) {
+currentKeyBytes = endKey;
+regionIndex++;
+continue;
+}
+}
 // Only attempt further pruning if the prefix range is 
using
 // a skip scan since we've already pruned the range of 
regions
 // based on the start/stop key.
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index e6337fa..b49aaf8 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -4870,6 +4870,66 @@ public class QueryCompilerTest extends 
BaseConnectionlessQueryTest {
 }
 
 @Test
+public void testLocalIndexRegionPruning() throws SQLException {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+conn.createStatement().execute("CREATE TABLE T (\n" + 
+"A CHAR(1) NOT NULL,\n" + 
+"B CHAR(1) NOT NULL,\n" + 
+"C CHAR(1) NOT NULL,\n" + 
+"D CHAR(1),\n" + 
+"CONSTRAINT PK PRIMARY KEY (\n" + 
+"A,\n" + 
+"B,\n" + 
+"C\n" + 
+")\n" + 
+") SPLIT ON ('A','C','E','G','I')");
+
+conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(D)");
+
+// un-pruned, need to scan all six regions
+String query = "SELECT * FROM T WHERE D = 'C'";
+PhoenixStatement statement = 
conn.createStatement().unwrap(PhoenixStatement.class);
+QueryPlan plan = statement.optimizeQuery(query);
+assertEquals("IDX", 
plan.getContext().getCurrentTable().getTable().getName().getString());
+plan.iterator();
+assertEquals(6, plan.getScans().size());
+
+// fixing first part of the key, can limit scanning to two regions
+query = "SELECT * FROM T WHERE A = 'A' AND D = 'C'";
+statement = conn.createStatement().unwrap(PhoenixStatement.class);
+plan = statement.optimizeQuery(query);
+assertEquals("IDX", 
plan.getContext().getCurrentTable().getTable().getName().getString());
+plan.iterator();
+assertEquals(2, plan.getScans().size());
+
+// same with skipscan filter
+query = "SELECT * FROM T WHERE A IN ('A', 'C') AND D = 'C'";
+statement = conn.createStatement().unwrap(PhoenixStatement.class);
+plan = statement.optimizeQuery(query);
+assertEquals("IDX", 
plan.getContext().getCurrentTable().getTab

[phoenix] branch master updated: PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.

2019-12-22 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 25cc076  PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.
25cc076 is described below

commit 25cc0764bc7c69511df61e777f851b963f4798fb
Author: Lars Hofhansl 
AuthorDate: Sun Dec 22 04:27:50 2019 -0800

PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.

This reverts commit 4ff6da4a00941052e5c81d79ed21b0aca9e49c44.
---
 .../end2end/ParameterizedIndexUpgradeToolIT.java   | 38 ++
 1 file changed, 17 insertions(+), 21 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
index 5f9dc9a..b320446 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
@@ -310,44 +310,40 @@ public class ParameterizedIndexUpgradeToolIT extends 
BaseTest {
 }
 
 @Test
-public void testToolWithIncorrectTables() throws Exception {
+public void testDryRunAndFailures() throws Exception {
 validate(true);
+
+// test with incorrect table
 iut.setInputTables("TEST3.TABLE_NOT_PRESENT");
 iut.prepareToolSetup();
 
 int status = iut.executeTool();
 Assert.assertEquals(-1, status);
 validate(true);
-}
 
-@Test
-public void testToolWithNoIndex() throws Exception {
-if (!upgrade || isNamespaceEnabled) {
-return;
-}
-conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id bigint 
NOT NULL "
-+ "PRIMARY KEY, a.name varchar, sal bigint, address varchar)" 
+ tableDDLOptions);
-iut.setInputTables("TEST.NEW_TABLE");
-iut.prepareToolSetup();
-int status = iut.executeTool();
-Assert.assertEquals(0, status);
-conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
-}
-
-@Test
-public void testToolWithInputFileParameter() throws Exception {
+// test with input file parameter
 BufferedWriter writer = new BufferedWriter(new FileWriter(new 
File(INPUT_FILE)));
 writer.write(INPUT_LIST);
 writer.close();
 
-validate(true);
-
 iut.setInputTables(null);
 iut.setInputFile(INPUT_FILE);
 iut.prepareToolSetup();
-iut.executeTool();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
 
 validate(true);
+
+// test table without index
+if (upgrade && !isNamespaceEnabled) {
+conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id 
bigint NOT NULL "
++ "PRIMARY KEY, a.name varchar, sal bigint, address 
varchar)" + tableDDLOptions);
+iut.setInputTables("TEST.NEW_TABLE");
+iut.prepareToolSetup();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
+conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
+}
 }
 
 @After



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.

2019-12-22 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 71fdb77  PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.
71fdb77 is described below

commit 71fdb77d0143eb9db0f04da96576830a6e0e8e02
Author: Lars Hofhansl 
AuthorDate: Sun Dec 22 04:27:16 2019 -0800

PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.

This reverts commit 0c5f0d6d308b3c9be9537e9a0915f0ee19f2271c.
---
 .../end2end/ParameterizedIndexUpgradeToolIT.java   | 38 ++
 1 file changed, 17 insertions(+), 21 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
index 16f99e3..5a2cef9 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
@@ -310,44 +310,40 @@ public class ParameterizedIndexUpgradeToolIT extends 
BaseTest {
 }
 
 @Test
-public void testToolWithIncorrectTables() throws Exception {
+public void testDryRunAndFailures() throws Exception {
 validate(true);
+
+// test with incorrect table
 iut.setInputTables("TEST3.TABLE_NOT_PRESENT");
 iut.prepareToolSetup();
 
 int status = iut.executeTool();
 Assert.assertEquals(-1, status);
 validate(true);
-}
 
-@Test
-public void testToolWithNoIndex() throws Exception {
-if (!upgrade || isNamespaceEnabled) {
-return;
-}
-conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id bigint 
NOT NULL "
-+ "PRIMARY KEY, a.name varchar, sal bigint, address varchar)" 
+ tableDDLOptions);
-iut.setInputTables("TEST.NEW_TABLE");
-iut.prepareToolSetup();
-int status = iut.executeTool();
-Assert.assertEquals(0, status);
-conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
-}
-
-@Test
-public void testToolWithInputFileParameter() throws Exception {
+// test with input file parameter
 BufferedWriter writer = new BufferedWriter(new FileWriter(new 
File(INPUT_FILE)));
 writer.write(INPUT_LIST);
 writer.close();
 
-validate(true);
-
 iut.setInputTables(null);
 iut.setInputFile(INPUT_FILE);
 iut.prepareToolSetup();
-iut.executeTool();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
 
 validate(true);
+
+// test table without index
+if (upgrade && !isNamespaceEnabled) {
+conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id 
bigint NOT NULL "
++ "PRIMARY KEY, a.name varchar, sal bigint, address 
varchar)" + tableDDLOptions);
+iut.setInputTables("TEST.NEW_TABLE");
+iut.prepareToolSetup();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
+conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
+}
 }
 
 @After



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.

2019-12-22 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 80c912e  PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.
80c912e is described below

commit 80c912e1f88ca6bd39e7c2f3dcee1e2d089535dc
Author: Lars Hofhansl 
AuthorDate: Sun Dec 22 04:26:41 2019 -0800

PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.

This reverts commit 95fd8e0d1abbb763f59e30d569b9c002f7253ada.
---
 .../end2end/ParameterizedIndexUpgradeToolIT.java   | 38 ++
 1 file changed, 17 insertions(+), 21 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
index 16f99e3..5a2cef9 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
@@ -310,44 +310,40 @@ public class ParameterizedIndexUpgradeToolIT extends 
BaseTest {
 }
 
 @Test
-public void testToolWithIncorrectTables() throws Exception {
+public void testDryRunAndFailures() throws Exception {
 validate(true);
+
+// test with incorrect table
 iut.setInputTables("TEST3.TABLE_NOT_PRESENT");
 iut.prepareToolSetup();
 
 int status = iut.executeTool();
 Assert.assertEquals(-1, status);
 validate(true);
-}
 
-@Test
-public void testToolWithNoIndex() throws Exception {
-if (!upgrade || isNamespaceEnabled) {
-return;
-}
-conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id bigint 
NOT NULL "
-+ "PRIMARY KEY, a.name varchar, sal bigint, address varchar)" 
+ tableDDLOptions);
-iut.setInputTables("TEST.NEW_TABLE");
-iut.prepareToolSetup();
-int status = iut.executeTool();
-Assert.assertEquals(0, status);
-conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
-}
-
-@Test
-public void testToolWithInputFileParameter() throws Exception {
+// test with input file parameter
 BufferedWriter writer = new BufferedWriter(new FileWriter(new 
File(INPUT_FILE)));
 writer.write(INPUT_LIST);
 writer.close();
 
-validate(true);
-
 iut.setInputTables(null);
 iut.setInputFile(INPUT_FILE);
 iut.prepareToolSetup();
-iut.executeTool();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
 
 validate(true);
+
+// test table without index
+if (upgrade && !isNamespaceEnabled) {
+conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id 
bigint NOT NULL "
++ "PRIMARY KEY, a.name varchar, sal bigint, address 
varchar)" + tableDDLOptions);
+iut.setInputTables("TEST.NEW_TABLE");
+iut.prepareToolSetup();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
+conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
+}
 }
 
 @After



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.

2019-12-22 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 9a948d0  PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.
9a948d0 is described below

commit 9a948d0cd654045d68f102c9d3e0f527c7b77b5d
Author: Lars Hofhansl 
AuthorDate: Sun Dec 22 04:25:45 2019 -0800

PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.

This reverts commit 02d5935cbbd75ad2491413042e5010bb76ed57c8.
---
 .../end2end/ParameterizedIndexUpgradeToolIT.java   | 38 ++
 1 file changed, 17 insertions(+), 21 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
index 16f99e3..5a2cef9 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
@@ -310,44 +310,40 @@ public class ParameterizedIndexUpgradeToolIT extends 
BaseTest {
 }
 
 @Test
-public void testToolWithIncorrectTables() throws Exception {
+public void testDryRunAndFailures() throws Exception {
 validate(true);
+
+// test with incorrect table
 iut.setInputTables("TEST3.TABLE_NOT_PRESENT");
 iut.prepareToolSetup();
 
 int status = iut.executeTool();
 Assert.assertEquals(-1, status);
 validate(true);
-}
 
-@Test
-public void testToolWithNoIndex() throws Exception {
-if (!upgrade || isNamespaceEnabled) {
-return;
-}
-conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id bigint 
NOT NULL "
-+ "PRIMARY KEY, a.name varchar, sal bigint, address varchar)" 
+ tableDDLOptions);
-iut.setInputTables("TEST.NEW_TABLE");
-iut.prepareToolSetup();
-int status = iut.executeTool();
-Assert.assertEquals(0, status);
-conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
-}
-
-@Test
-public void testToolWithInputFileParameter() throws Exception {
+// test with input file parameter
 BufferedWriter writer = new BufferedWriter(new FileWriter(new 
File(INPUT_FILE)));
 writer.write(INPUT_LIST);
 writer.close();
 
-validate(true);
-
 iut.setInputTables(null);
 iut.setInputFile(INPUT_FILE);
 iut.prepareToolSetup();
-iut.executeTool();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
 
 validate(true);
+
+// test table without index
+if (upgrade && !isNamespaceEnabled) {
+conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id 
bigint NOT NULL "
++ "PRIMARY KEY, a.name varchar, sal bigint, address 
varchar)" + tableDDLOptions);
+iut.setInputTables("TEST.NEW_TABLE");
+iut.prepareToolSetup();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
+conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
+}
 }
 
 @After



[phoenix] branch master updated: Revert "PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT."

2019-12-21 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 4ff6da4  Revert "PHOENIX-5616 Speed up 
ParameterizedIndexUpgradeToolIT."
4ff6da4 is described below

commit 4ff6da4a00941052e5c81d79ed21b0aca9e49c44
Author: Lars Hofhansl 
AuthorDate: Sat Dec 21 07:47:40 2019 -0800

Revert "PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT."

This reverts commit 9cd873492d4047d20c09259bedcd6df91348a08a.
---
 .../end2end/ParameterizedIndexUpgradeToolIT.java   | 38 --
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
index b320446..5f9dc9a 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
@@ -310,40 +310,44 @@ public class ParameterizedIndexUpgradeToolIT extends 
BaseTest {
 }
 
 @Test
-public void testDryRunAndFailures() throws Exception {
+public void testToolWithIncorrectTables() throws Exception {
 validate(true);
-
-// test with incorrect table
 iut.setInputTables("TEST3.TABLE_NOT_PRESENT");
 iut.prepareToolSetup();
 
 int status = iut.executeTool();
 Assert.assertEquals(-1, status);
 validate(true);
+}
 
-// test with input file parameter
+@Test
+public void testToolWithNoIndex() throws Exception {
+if (!upgrade || isNamespaceEnabled) {
+return;
+}
+conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id bigint 
NOT NULL "
++ "PRIMARY KEY, a.name varchar, sal bigint, address varchar)" 
+ tableDDLOptions);
+iut.setInputTables("TEST.NEW_TABLE");
+iut.prepareToolSetup();
+int status = iut.executeTool();
+Assert.assertEquals(0, status);
+conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
+}
+
+@Test
+public void testToolWithInputFileParameter() throws Exception {
 BufferedWriter writer = new BufferedWriter(new FileWriter(new 
File(INPUT_FILE)));
 writer.write(INPUT_LIST);
 writer.close();
 
+validate(true);
+
 iut.setInputTables(null);
 iut.setInputFile(INPUT_FILE);
 iut.prepareToolSetup();
-status = iut.executeTool();
-Assert.assertEquals(0, status);
+iut.executeTool();
 
 validate(true);
-
-// test table without index
-if (upgrade && !isNamespaceEnabled) {
-conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id 
bigint NOT NULL "
-+ "PRIMARY KEY, a.name varchar, sal bigint, address 
varchar)" + tableDDLOptions);
-iut.setInputTables("TEST.NEW_TABLE");
-iut.prepareToolSetup();
-status = iut.executeTool();
-Assert.assertEquals(0, status);
-conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
-}
 }
 
 @After



[phoenix] branch 4.x-HBase-1.3 updated: Revert "PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT."

2019-12-21 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 0c5f0d6  Revert "PHOENIX-5616 Speed up 
ParameterizedIndexUpgradeToolIT."
0c5f0d6 is described below

commit 0c5f0d6d308b3c9be9537e9a0915f0ee19f2271c
Author: Lars Hofhansl 
AuthorDate: Sat Dec 21 07:47:03 2019 -0800

Revert "PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT."

This reverts commit 53c0089a29a33124b0a0be4c5315995ace2c70fd.
---
 .../end2end/ParameterizedIndexUpgradeToolIT.java   | 38 --
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
index 5a2cef9..16f99e3 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
@@ -310,40 +310,44 @@ public class ParameterizedIndexUpgradeToolIT extends 
BaseTest {
 }
 
 @Test
-public void testDryRunAndFailures() throws Exception {
+public void testToolWithIncorrectTables() throws Exception {
 validate(true);
-
-// test with incorrect table
 iut.setInputTables("TEST3.TABLE_NOT_PRESENT");
 iut.prepareToolSetup();
 
 int status = iut.executeTool();
 Assert.assertEquals(-1, status);
 validate(true);
+}
 
-// test with input file parameter
+@Test
+public void testToolWithNoIndex() throws Exception {
+if (!upgrade || isNamespaceEnabled) {
+return;
+}
+conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id bigint 
NOT NULL "
++ "PRIMARY KEY, a.name varchar, sal bigint, address varchar)" 
+ tableDDLOptions);
+iut.setInputTables("TEST.NEW_TABLE");
+iut.prepareToolSetup();
+int status = iut.executeTool();
+Assert.assertEquals(0, status);
+conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
+}
+
+@Test
+public void testToolWithInputFileParameter() throws Exception {
 BufferedWriter writer = new BufferedWriter(new FileWriter(new 
File(INPUT_FILE)));
 writer.write(INPUT_LIST);
 writer.close();
 
+validate(true);
+
 iut.setInputTables(null);
 iut.setInputFile(INPUT_FILE);
 iut.prepareToolSetup();
-status = iut.executeTool();
-Assert.assertEquals(0, status);
+iut.executeTool();
 
 validate(true);
-
-// test table without index
-if (upgrade && !isNamespaceEnabled) {
-conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id 
bigint NOT NULL "
-+ "PRIMARY KEY, a.name varchar, sal bigint, address 
varchar)" + tableDDLOptions);
-iut.setInputTables("TEST.NEW_TABLE");
-iut.prepareToolSetup();
-status = iut.executeTool();
-Assert.assertEquals(0, status);
-conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
-}
 }
 
 @After



[phoenix] branch 4.x-HBase-1.4 updated: Revert "PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT."

2019-12-21 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 95fd8e0  Revert "PHOENIX-5616 Speed up 
ParameterizedIndexUpgradeToolIT."
95fd8e0 is described below

commit 95fd8e0d1abbb763f59e30d569b9c002f7253ada
Author: Lars Hofhansl 
AuthorDate: Sat Dec 21 07:46:32 2019 -0800

Revert "PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT."

This reverts commit adc58977c9e1069345c82f838b39083fa3fb6e4a.
---
 .../end2end/ParameterizedIndexUpgradeToolIT.java   | 38 --
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
index 5a2cef9..16f99e3 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
@@ -310,40 +310,44 @@ public class ParameterizedIndexUpgradeToolIT extends 
BaseTest {
 }
 
 @Test
-public void testDryRunAndFailures() throws Exception {
+public void testToolWithIncorrectTables() throws Exception {
 validate(true);
-
-// test with incorrect table
 iut.setInputTables("TEST3.TABLE_NOT_PRESENT");
 iut.prepareToolSetup();
 
 int status = iut.executeTool();
 Assert.assertEquals(-1, status);
 validate(true);
+}
 
-// test with input file parameter
+@Test
+public void testToolWithNoIndex() throws Exception {
+if (!upgrade || isNamespaceEnabled) {
+return;
+}
+conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id bigint 
NOT NULL "
++ "PRIMARY KEY, a.name varchar, sal bigint, address varchar)" 
+ tableDDLOptions);
+iut.setInputTables("TEST.NEW_TABLE");
+iut.prepareToolSetup();
+int status = iut.executeTool();
+Assert.assertEquals(0, status);
+conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
+}
+
+@Test
+public void testToolWithInputFileParameter() throws Exception {
 BufferedWriter writer = new BufferedWriter(new FileWriter(new 
File(INPUT_FILE)));
 writer.write(INPUT_LIST);
 writer.close();
 
+validate(true);
+
 iut.setInputTables(null);
 iut.setInputFile(INPUT_FILE);
 iut.prepareToolSetup();
-status = iut.executeTool();
-Assert.assertEquals(0, status);
+iut.executeTool();
 
 validate(true);
-
-// test table without index
-if (upgrade && !isNamespaceEnabled) {
-conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id 
bigint NOT NULL "
-+ "PRIMARY KEY, a.name varchar, sal bigint, address 
varchar)" + tableDDLOptions);
-iut.setInputTables("TEST.NEW_TABLE");
-iut.prepareToolSetup();
-status = iut.executeTool();
-Assert.assertEquals(0, status);
-conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
-}
 }
 
 @After



[phoenix] branch 4.x-HBase-1.5 updated: Revert "PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT."

2019-12-21 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 02d5935  Revert "PHOENIX-5616 Speed up 
ParameterizedIndexUpgradeToolIT."
02d5935 is described below

commit 02d5935cbbd75ad2491413042e5010bb76ed57c8
Author: Lars Hofhansl 
AuthorDate: Sat Dec 21 07:45:50 2019 -0800

Revert "PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT."

This reverts commit 3d8b3f042ba9357ef6e1e047156839aa5513f05e.
---
 .../end2end/ParameterizedIndexUpgradeToolIT.java   | 38 --
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
index 5a2cef9..16f99e3 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
@@ -310,40 +310,44 @@ public class ParameterizedIndexUpgradeToolIT extends 
BaseTest {
 }
 
 @Test
-public void testDryRunAndFailures() throws Exception {
+public void testToolWithIncorrectTables() throws Exception {
 validate(true);
-
-// test with incorrect table
 iut.setInputTables("TEST3.TABLE_NOT_PRESENT");
 iut.prepareToolSetup();
 
 int status = iut.executeTool();
 Assert.assertEquals(-1, status);
 validate(true);
+}
 
-// test with input file parameter
+@Test
+public void testToolWithNoIndex() throws Exception {
+if (!upgrade || isNamespaceEnabled) {
+return;
+}
+conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id bigint 
NOT NULL "
++ "PRIMARY KEY, a.name varchar, sal bigint, address varchar)" 
+ tableDDLOptions);
+iut.setInputTables("TEST.NEW_TABLE");
+iut.prepareToolSetup();
+int status = iut.executeTool();
+Assert.assertEquals(0, status);
+conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
+}
+
+@Test
+public void testToolWithInputFileParameter() throws Exception {
 BufferedWriter writer = new BufferedWriter(new FileWriter(new 
File(INPUT_FILE)));
 writer.write(INPUT_LIST);
 writer.close();
 
+validate(true);
+
 iut.setInputTables(null);
 iut.setInputFile(INPUT_FILE);
 iut.prepareToolSetup();
-status = iut.executeTool();
-Assert.assertEquals(0, status);
+iut.executeTool();
 
 validate(true);
-
-// test table without index
-if (upgrade && !isNamespaceEnabled) {
-conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id 
bigint NOT NULL "
-+ "PRIMARY KEY, a.name varchar, sal bigint, address 
varchar)" + tableDDLOptions);
-iut.setInputTables("TEST.NEW_TABLE");
-iut.prepareToolSetup();
-status = iut.executeTool();
-Assert.assertEquals(0, status);
-conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
-}
 }
 
 @After



[phoenix] branch master updated: PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.

2019-12-19 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 9cd8734  PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.
9cd8734 is described below

commit 9cd873492d4047d20c09259bedcd6df91348a08a
Author: Lars Hofhansl 
AuthorDate: Thu Dec 19 01:02:05 2019 -0800

PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.
---
 .../end2end/ParameterizedIndexUpgradeToolIT.java   | 38 ++
 1 file changed, 17 insertions(+), 21 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
index 5f9dc9a..b320446 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
@@ -310,44 +310,40 @@ public class ParameterizedIndexUpgradeToolIT extends 
BaseTest {
 }
 
 @Test
-public void testToolWithIncorrectTables() throws Exception {
+public void testDryRunAndFailures() throws Exception {
 validate(true);
+
+// test with incorrect table
 iut.setInputTables("TEST3.TABLE_NOT_PRESENT");
 iut.prepareToolSetup();
 
 int status = iut.executeTool();
 Assert.assertEquals(-1, status);
 validate(true);
-}
 
-@Test
-public void testToolWithNoIndex() throws Exception {
-if (!upgrade || isNamespaceEnabled) {
-return;
-}
-conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id bigint 
NOT NULL "
-+ "PRIMARY KEY, a.name varchar, sal bigint, address varchar)" 
+ tableDDLOptions);
-iut.setInputTables("TEST.NEW_TABLE");
-iut.prepareToolSetup();
-int status = iut.executeTool();
-Assert.assertEquals(0, status);
-conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
-}
-
-@Test
-public void testToolWithInputFileParameter() throws Exception {
+// test with input file parameter
 BufferedWriter writer = new BufferedWriter(new FileWriter(new 
File(INPUT_FILE)));
 writer.write(INPUT_LIST);
 writer.close();
 
-validate(true);
-
 iut.setInputTables(null);
 iut.setInputFile(INPUT_FILE);
 iut.prepareToolSetup();
-iut.executeTool();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
 
 validate(true);
+
+// test table without index
+if (upgrade && !isNamespaceEnabled) {
+conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id 
bigint NOT NULL "
++ "PRIMARY KEY, a.name varchar, sal bigint, address 
varchar)" + tableDDLOptions);
+iut.setInputTables("TEST.NEW_TABLE");
+iut.prepareToolSetup();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
+conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
+}
 }
 
 @After



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.

2019-12-19 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 53c0089  PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.
53c0089 is described below

commit 53c0089a29a33124b0a0be4c5315995ace2c70fd
Author: Lars Hofhansl 
AuthorDate: Thu Dec 19 01:02:05 2019 -0800

PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.
---
 .../end2end/ParameterizedIndexUpgradeToolIT.java   | 38 ++
 1 file changed, 17 insertions(+), 21 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
index 16f99e3..5a2cef9 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
@@ -310,44 +310,40 @@ public class ParameterizedIndexUpgradeToolIT extends 
BaseTest {
 }
 
 @Test
-public void testToolWithIncorrectTables() throws Exception {
+public void testDryRunAndFailures() throws Exception {
 validate(true);
+
+// test with incorrect table
 iut.setInputTables("TEST3.TABLE_NOT_PRESENT");
 iut.prepareToolSetup();
 
 int status = iut.executeTool();
 Assert.assertEquals(-1, status);
 validate(true);
-}
 
-@Test
-public void testToolWithNoIndex() throws Exception {
-if (!upgrade || isNamespaceEnabled) {
-return;
-}
-conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id bigint 
NOT NULL "
-+ "PRIMARY KEY, a.name varchar, sal bigint, address varchar)" 
+ tableDDLOptions);
-iut.setInputTables("TEST.NEW_TABLE");
-iut.prepareToolSetup();
-int status = iut.executeTool();
-Assert.assertEquals(0, status);
-conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
-}
-
-@Test
-public void testToolWithInputFileParameter() throws Exception {
+// test with input file parameter
 BufferedWriter writer = new BufferedWriter(new FileWriter(new 
File(INPUT_FILE)));
 writer.write(INPUT_LIST);
 writer.close();
 
-validate(true);
-
 iut.setInputTables(null);
 iut.setInputFile(INPUT_FILE);
 iut.prepareToolSetup();
-iut.executeTool();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
 
 validate(true);
+
+// test table without index
+if (upgrade && !isNamespaceEnabled) {
+conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id 
bigint NOT NULL "
++ "PRIMARY KEY, a.name varchar, sal bigint, address 
varchar)" + tableDDLOptions);
+iut.setInputTables("TEST.NEW_TABLE");
+iut.prepareToolSetup();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
+conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
+}
 }
 
 @After



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.

2019-12-19 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 3d8b3f0  PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.
3d8b3f0 is described below

commit 3d8b3f042ba9357ef6e1e047156839aa5513f05e
Author: Lars Hofhansl 
AuthorDate: Thu Dec 19 01:02:05 2019 -0800

PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.
---
 .../end2end/ParameterizedIndexUpgradeToolIT.java   | 38 ++
 1 file changed, 17 insertions(+), 21 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
index 16f99e3..5a2cef9 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
@@ -310,44 +310,40 @@ public class ParameterizedIndexUpgradeToolIT extends 
BaseTest {
 }
 
 @Test
-public void testToolWithIncorrectTables() throws Exception {
+public void testDryRunAndFailures() throws Exception {
 validate(true);
+
+// test with incorrect table
 iut.setInputTables("TEST3.TABLE_NOT_PRESENT");
 iut.prepareToolSetup();
 
 int status = iut.executeTool();
 Assert.assertEquals(-1, status);
 validate(true);
-}
 
-@Test
-public void testToolWithNoIndex() throws Exception {
-if (!upgrade || isNamespaceEnabled) {
-return;
-}
-conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id bigint 
NOT NULL "
-+ "PRIMARY KEY, a.name varchar, sal bigint, address varchar)" 
+ tableDDLOptions);
-iut.setInputTables("TEST.NEW_TABLE");
-iut.prepareToolSetup();
-int status = iut.executeTool();
-Assert.assertEquals(0, status);
-conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
-}
-
-@Test
-public void testToolWithInputFileParameter() throws Exception {
+// test with input file parameter
 BufferedWriter writer = new BufferedWriter(new FileWriter(new 
File(INPUT_FILE)));
 writer.write(INPUT_LIST);
 writer.close();
 
-validate(true);
-
 iut.setInputTables(null);
 iut.setInputFile(INPUT_FILE);
 iut.prepareToolSetup();
-iut.executeTool();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
 
 validate(true);
+
+// test table without index
+if (upgrade && !isNamespaceEnabled) {
+conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id 
bigint NOT NULL "
++ "PRIMARY KEY, a.name varchar, sal bigint, address 
varchar)" + tableDDLOptions);
+iut.setInputTables("TEST.NEW_TABLE");
+iut.prepareToolSetup();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
+conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
+}
 }
 
 @After



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.

2019-12-19 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new adc5897  PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.
adc5897 is described below

commit adc58977c9e1069345c82f838b39083fa3fb6e4a
Author: Lars Hofhansl 
AuthorDate: Thu Dec 19 01:02:05 2019 -0800

PHOENIX-5616 Speed up ParameterizedIndexUpgradeToolIT.
---
 .../end2end/ParameterizedIndexUpgradeToolIT.java   | 38 ++
 1 file changed, 17 insertions(+), 21 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
index 16f99e3..5a2cef9 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParameterizedIndexUpgradeToolIT.java
@@ -310,44 +310,40 @@ public class ParameterizedIndexUpgradeToolIT extends 
BaseTest {
 }
 
 @Test
-public void testToolWithIncorrectTables() throws Exception {
+public void testDryRunAndFailures() throws Exception {
 validate(true);
+
+// test with incorrect table
 iut.setInputTables("TEST3.TABLE_NOT_PRESENT");
 iut.prepareToolSetup();
 
 int status = iut.executeTool();
 Assert.assertEquals(-1, status);
 validate(true);
-}
 
-@Test
-public void testToolWithNoIndex() throws Exception {
-if (!upgrade || isNamespaceEnabled) {
-return;
-}
-conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id bigint 
NOT NULL "
-+ "PRIMARY KEY, a.name varchar, sal bigint, address varchar)" 
+ tableDDLOptions);
-iut.setInputTables("TEST.NEW_TABLE");
-iut.prepareToolSetup();
-int status = iut.executeTool();
-Assert.assertEquals(0, status);
-conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
-}
-
-@Test
-public void testToolWithInputFileParameter() throws Exception {
+// test with input file parameter
 BufferedWriter writer = new BufferedWriter(new FileWriter(new 
File(INPUT_FILE)));
 writer.write(INPUT_LIST);
 writer.close();
 
-validate(true);
-
 iut.setInputTables(null);
 iut.setInputFile(INPUT_FILE);
 iut.prepareToolSetup();
-iut.executeTool();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
 
 validate(true);
+
+// test table without index
+if (upgrade && !isNamespaceEnabled) {
+conn.createStatement().execute("CREATE TABLE TEST.NEW_TABLE (id 
bigint NOT NULL "
++ "PRIMARY KEY, a.name varchar, sal bigint, address 
varchar)" + tableDDLOptions);
+iut.setInputTables("TEST.NEW_TABLE");
+iut.prepareToolSetup();
+status = iut.executeTool();
+Assert.assertEquals(0, status);
+conn.createStatement().execute("DROP TABLE TEST.NEW_TABLE");
+}
 }
 
 @After



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5617 Allow using the server side JDBC client in Phoenix Sandbox.

2019-12-19 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 26c7680  PHOENIX-5617 Allow using the server side JDBC client in 
Phoenix Sandbox.
26c7680 is described below

commit 26c768063ac5253d083820cdd43c5faa384ea459
Author: Lars Hofhansl 
AuthorDate: Thu Dec 19 00:55:04 2019 -0800

PHOENIX-5617 Allow using the server side JDBC client in Phoenix Sandbox.
---
 phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java 
b/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
index ec4e920..102e97c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
@@ -18,10 +18,12 @@
 package org.apache.phoenix;
 
 import com.google.common.collect.ImmutableMap;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -37,7 +39,9 @@ public class Sandbox {
 public static void main(String[] args) throws Exception {
 System.out.println("Starting Phoenix sandbox");
 Configuration conf = HBaseConfiguration.create();
-BaseTest.setUpConfigForMiniCluster(conf, new 
ReadOnlyProps(ImmutableMap.of()));
+// unset test=true parameter
+BaseTest.setUpConfigForMiniCluster(conf, new ReadOnlyProps(
+ImmutableMap. 
of(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, "")));
 
 final HBaseTestingUtility testUtil = new HBaseTestingUtility(conf);
 testUtil.startMiniCluster();



[phoenix] branch master updated: PHOENIX-5617 Allow using the server side JDBC client in Phoenix Sandbox.

2019-12-19 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 834d79c  PHOENIX-5617 Allow using the server side JDBC client in 
Phoenix Sandbox.
834d79c is described below

commit 834d79c0e57665d7e0ae89bc0d5723c0846fbc3b
Author: Lars Hofhansl 
AuthorDate: Thu Dec 19 00:55:04 2019 -0800

PHOENIX-5617 Allow using the server side JDBC client in Phoenix Sandbox.
---
 phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java 
b/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
index ec4e920..102e97c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
@@ -18,10 +18,12 @@
 package org.apache.phoenix;
 
 import com.google.common.collect.ImmutableMap;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -37,7 +39,9 @@ public class Sandbox {
 public static void main(String[] args) throws Exception {
 System.out.println("Starting Phoenix sandbox");
 Configuration conf = HBaseConfiguration.create();
-BaseTest.setUpConfigForMiniCluster(conf, new 
ReadOnlyProps(ImmutableMap.of()));
+// unset test=true parameter
+BaseTest.setUpConfigForMiniCluster(conf, new ReadOnlyProps(
+ImmutableMap. 
of(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, "")));
 
 final HBaseTestingUtility testUtil = new HBaseTestingUtility(conf);
 testUtil.startMiniCluster();



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5617 Allow using the server side JDBC client in Phoenix Sandbox.

2019-12-19 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 8f4d9b2  PHOENIX-5617 Allow using the server side JDBC client in 
Phoenix Sandbox.
8f4d9b2 is described below

commit 8f4d9b2d75e68f318f0bca5bec17556e0d9bde77
Author: Lars Hofhansl 
AuthorDate: Thu Dec 19 00:55:04 2019 -0800

PHOENIX-5617 Allow using the server side JDBC client in Phoenix Sandbox.
---
 phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java 
b/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
index ec4e920..102e97c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
@@ -18,10 +18,12 @@
 package org.apache.phoenix;
 
 import com.google.common.collect.ImmutableMap;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -37,7 +39,9 @@ public class Sandbox {
 public static void main(String[] args) throws Exception {
 System.out.println("Starting Phoenix sandbox");
 Configuration conf = HBaseConfiguration.create();
-BaseTest.setUpConfigForMiniCluster(conf, new 
ReadOnlyProps(ImmutableMap.of()));
+// unset test=true parameter
+BaseTest.setUpConfigForMiniCluster(conf, new ReadOnlyProps(
+ImmutableMap. 
of(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, "")));
 
 final HBaseTestingUtility testUtil = new HBaseTestingUtility(conf);
 testUtil.startMiniCluster();



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5617 Allow using the server side JDBC client in Phoenix Sandbox.

2019-12-19 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 6e20b4f  PHOENIX-5617 Allow using the server side JDBC client in 
Phoenix Sandbox.
6e20b4f is described below

commit 6e20b4f9a4fa4d68c60cce104ba15d58ec5ebe9d
Author: Lars Hofhansl 
AuthorDate: Thu Dec 19 00:55:04 2019 -0800

PHOENIX-5617 Allow using the server side JDBC client in Phoenix Sandbox.
---
 phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java 
b/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
index ec4e920..102e97c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/Sandbox.java
@@ -18,10 +18,12 @@
 package org.apache.phoenix;
 
 import com.google.common.collect.ImmutableMap;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -37,7 +39,9 @@ public class Sandbox {
 public static void main(String[] args) throws Exception {
 System.out.println("Starting Phoenix sandbox");
 Configuration conf = HBaseConfiguration.create();
-BaseTest.setUpConfigForMiniCluster(conf, new 
ReadOnlyProps(ImmutableMap.of()));
+// unset test=true parameter
+BaseTest.setUpConfigForMiniCluster(conf, new ReadOnlyProps(
+ImmutableMap. 
of(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, "")));
 
 final HBaseTestingUtility testUtil = new HBaseTestingUtility(conf);
 testUtil.startMiniCluster();



[phoenix] branch master updated: PHOENIX-5610 Dropping a view or column with a 4.14 client raises an ArrayIndexOutOfBoundsException on 4.15 server.

2019-12-12 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new e91b614  PHOENIX-5610 Dropping a view or column with a 4.14 client 
raises an ArrayIndexOutOfBoundsException on 4.15 server.
e91b614 is described below

commit e91b614d7e6f2867f3ac9930aff66311f779dded
Author: Lars Hofhansl 
AuthorDate: Thu Dec 12 09:10:19 2019 -0800

PHOENIX-5610 Dropping a view or column with a 4.14 client raises an 
ArrayIndexOutOfBoundsException on 4.15 server.
---
 .../apache/phoenix/coprocessor/MetaDataEndpointImpl.java | 16 
 1 file changed, 16 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 86afe8d..fb626c4 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2490,6 +2490,14 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
 }
 }
 
+if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG && tableType == 
PTableType.VIEW) {
+try (PhoenixConnection connection = 
QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class))
 {
+PTable pTable = PhoenixRuntime.getTableNoCache(connection, 
table.getParentName().getString());
+table = 
ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, pTable);
+} catch (ClassNotFoundException e) {
+throw new IOException(e);
+}
+}
 return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS,
 EnvironmentEdgeManager.currentTimeMillis(), table, 
tableNamesToDelete, sharedTablesToDelete);
 }
@@ -2731,6 +2739,14 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
 return result;
 } else {
 table = buildTable(key, cacheKey, region, 
HConstants.LATEST_TIMESTAMP, clientVersion);
+if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG && type 
== PTableType.VIEW) {
+try (PhoenixConnection connection = 
QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class))
 {
+PTable pTable = 
PhoenixRuntime.getTableNoCache(connection, table.getParentName().getString());
+table = 
ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, pTable);
+} catch (ClassNotFoundException e) {
+throw new IOException(e);
+}
+}
 return new 
MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, currentTime, table,
 tableNamesToDelete, sharedTablesToDelete);
 }



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5610 Dropping a view or column with a 4.14 client raises an ArrayIndexOutOfBoundsException on 4.15 server.

2019-12-12 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new fb35da9  PHOENIX-5610 Dropping a view or column with a 4.14 client 
raises an ArrayIndexOutOfBoundsException on 4.15 server.
fb35da9 is described below

commit fb35da9a2ee898122c0814ddfa8de742ead9567b
Author: Lars Hofhansl 
AuthorDate: Thu Dec 12 09:10:19 2019 -0800

PHOENIX-5610 Dropping a view or column with a 4.14 client raises an 
ArrayIndexOutOfBoundsException on 4.15 server.
---
 .../apache/phoenix/coprocessor/MetaDataEndpointImpl.java | 16 
 1 file changed, 16 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index dab77b6..12e2f12 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2473,6 +2473,14 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 }
 }
 
+if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG && tableType == 
PTableType.VIEW) {
+try (PhoenixConnection connection = 
QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class))
 {
+PTable pTable = PhoenixRuntime.getTableNoCache(connection, 
table.getParentName().getString());
+table = 
ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, pTable);
+} catch (ClassNotFoundException e) {
+throw new IOException(e);
+}
+}
 return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS,
 EnvironmentEdgeManager.currentTimeMillis(), table, 
tableNamesToDelete, sharedTablesToDelete);
 }
@@ -2713,6 +2721,14 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 return result;
 } else {
 table = buildTable(key, cacheKey, region, 
HConstants.LATEST_TIMESTAMP, clientVersion);
+if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG && type 
== PTableType.VIEW) {
+try (PhoenixConnection connection = 
QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class))
 {
+PTable pTable = 
PhoenixRuntime.getTableNoCache(connection, table.getParentName().getString());
+table = 
ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, pTable);
+} catch (ClassNotFoundException e) {
+throw new IOException(e);
+}
+}
 return new 
MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, currentTime, table,
 tableNamesToDelete, sharedTablesToDelete);
 }



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5610 Dropping a view or column with a 4.14 client raises an ArrayIndexOutOfBoundsException on 4.15 server.

2019-12-12 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 7cc5f06  PHOENIX-5610 Dropping a view or column with a 4.14 client 
raises an ArrayIndexOutOfBoundsException on 4.15 server.
7cc5f06 is described below

commit 7cc5f062df25e819fcba8d30942926be42cc50e2
Author: Lars Hofhansl 
AuthorDate: Thu Dec 12 09:10:19 2019 -0800

PHOENIX-5610 Dropping a view or column with a 4.14 client raises an 
ArrayIndexOutOfBoundsException on 4.15 server.
---
 .../apache/phoenix/coprocessor/MetaDataEndpointImpl.java | 16 
 1 file changed, 16 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index dab77b6..12e2f12 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2473,6 +2473,14 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 }
 }
 
+if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG && tableType == 
PTableType.VIEW) {
+try (PhoenixConnection connection = 
QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class))
 {
+PTable pTable = PhoenixRuntime.getTableNoCache(connection, 
table.getParentName().getString());
+table = 
ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, pTable);
+} catch (ClassNotFoundException e) {
+throw new IOException(e);
+}
+}
 return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS,
 EnvironmentEdgeManager.currentTimeMillis(), table, 
tableNamesToDelete, sharedTablesToDelete);
 }
@@ -2713,6 +2721,14 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 return result;
 } else {
 table = buildTable(key, cacheKey, region, 
HConstants.LATEST_TIMESTAMP, clientVersion);
+if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG && type 
== PTableType.VIEW) {
+try (PhoenixConnection connection = 
QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class))
 {
+PTable pTable = 
PhoenixRuntime.getTableNoCache(connection, table.getParentName().getString());
+table = 
ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, pTable);
+} catch (ClassNotFoundException e) {
+throw new IOException(e);
+}
+}
 return new 
MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, currentTime, table,
 tableNamesToDelete, sharedTablesToDelete);
 }



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5610 Dropping a view or column with a 4.14 client raises an ArrayIndexOutOfBoundsException on 4.15 server.

2019-12-12 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 5e41585  PHOENIX-5610 Dropping a view or column with a 4.14 client 
raises an ArrayIndexOutOfBoundsException on 4.15 server.
5e41585 is described below

commit 5e4158536292ea83389efda89b9e2de9eb3a70ea
Author: Lars Hofhansl 
AuthorDate: Thu Dec 12 09:10:19 2019 -0800

PHOENIX-5610 Dropping a view or column with a 4.14 client raises an 
ArrayIndexOutOfBoundsException on 4.15 server.
---
 .../apache/phoenix/coprocessor/MetaDataEndpointImpl.java | 16 
 1 file changed, 16 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index dab77b6..12e2f12 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2473,6 +2473,14 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 }
 }
 
+if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG && tableType == 
PTableType.VIEW) {
+try (PhoenixConnection connection = 
QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class))
 {
+PTable pTable = PhoenixRuntime.getTableNoCache(connection, 
table.getParentName().getString());
+table = 
ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, pTable);
+} catch (ClassNotFoundException e) {
+throw new IOException(e);
+}
+}
 return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS,
 EnvironmentEdgeManager.currentTimeMillis(), table, 
tableNamesToDelete, sharedTablesToDelete);
 }
@@ -2713,6 +2721,14 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 return result;
 } else {
 table = buildTable(key, cacheKey, region, 
HConstants.LATEST_TIMESTAMP, clientVersion);
+if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG && type 
== PTableType.VIEW) {
+try (PhoenixConnection connection = 
QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class))
 {
+PTable pTable = 
PhoenixRuntime.getTableNoCache(connection, table.getParentName().getString());
+table = 
ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, pTable);
+} catch (ClassNotFoundException e) {
+throw new IOException(e);
+}
+}
 return new 
MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, currentTime, table,
 tableNamesToDelete, sharedTablesToDelete);
 }



[phoenix] branch master updated: PHOENIX-5584 Older clients don't get correct view metadata when a 4.15 client creates a view.

2019-11-25 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new cdabf29  PHOENIX-5584 Older clients don't get correct view metadata 
when a 4.15 client creates a view.
cdabf29 is described below

commit cdabf29aa7440c4a8b8c85b81542121ffcb7baac
Author: Lars Hofhansl 
AuthorDate: Mon Nov 25 16:20:58 2019 -0800

PHOENIX-5584 Older clients don't get correct view metadata when a 4.15 
client creates a view.
---
 .../apache/phoenix/coprocessor/MetaDataEndpointImpl.java| 13 ++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 0be2383..c3739d4 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -603,6 +603,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements RegionCopr
 getCoprocessorHost().preGetTable(Bytes.toString(tenantId), 
SchemaUtil.getTableName(schemaName, tableName),
 TableName.valueOf(table.getPhysicalName().getBytes()));
 
+if (request.getClientVersion() < MIN_SPLITTABLE_SYSTEM_CATALOG
+&& table.getType() == PTableType.VIEW
+&& table.getViewType() != ViewType.MAPPED) {
+try (PhoenixConnection connection = 
QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class))
 {
+PTable pTable = PhoenixRuntime.getTableNoCache(connection, 
table.getParentName().getString());
+table = 
ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, pTable);
+}
+}
 
builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
 builder.setMutationTime(currentTime);
 if (blockWriteRebuildIndex) {
@@ -2823,14 +2831,13 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
 }
 
 /**
- * Looks up the table locally if its present on this region, or else makes 
an rpc call
- * to look up the region using PhoenixRuntime.getTable
+ * Looks up the table locally if its present on this region.
  */
 private PTable doGetTable(byte[] tenantId, byte[] schemaName, byte[] 
tableName,
   long clientTimeStamp, RowLock rowLock, int 
clientVersion) throws IOException, SQLException {
 Region region = env.getRegion();
 final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, 
tableName);
-// if this region doesn't contain the metadata rows look up the table 
by using PhoenixRuntime.getTable
+// if this region doesn't contain the metadata rows then fail
 if (!region.getRegionInfo().containsRow(key)) {
 throw new 
SQLExceptionInfo.Builder(SQLExceptionCode.GET_TABLE_ERROR)
 .setSchemaName(Bytes.toString(schemaName))



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5584 Older clients don't get correct view metadata when a 4.15 client creates a view.

2019-11-25 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 5f7acf4  PHOENIX-5584 Older clients don't get correct view metadata 
when a 4.15 client creates a view.
5f7acf4 is described below

commit 5f7acf46bf01e8e1e714c6a7556d794cc9c4c7cf
Author: Lars Hofhansl 
AuthorDate: Mon Nov 25 16:20:58 2019 -0800

PHOENIX-5584 Older clients don't get correct view metadata when a 4.15 
client creates a view.
---
 .../apache/phoenix/coprocessor/MetaDataEndpointImpl.java| 13 ++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 9fc6020..21c0823 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -601,6 +601,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements Coprocesso
 getCoprocessorHost().preGetTable(Bytes.toString(tenantId), 
SchemaUtil.getTableName(schemaName, tableName),
 TableName.valueOf(table.getPhysicalName().getBytes()));
 
+if (request.getClientVersion() < MIN_SPLITTABLE_SYSTEM_CATALOG
+&& table.getType() == PTableType.VIEW
+&& table.getViewType() != ViewType.MAPPED) {
+try (PhoenixConnection connection = 
QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class))
 {
+PTable pTable = PhoenixRuntime.getTableNoCache(connection, 
table.getParentName().getString());
+table = 
ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, pTable);
+}
+}
 
builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
 builder.setMutationTime(currentTime);
 if (blockWriteRebuildIndex) {
@@ -2806,14 +2814,13 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 }
 
 /**
- * Looks up the table locally if its present on this region, or else makes 
an rpc call
- * to look up the region using PhoenixRuntime.getTable
+ * Looks up the table locally if its present on this region.
  */
 private PTable doGetTable(byte[] tenantId, byte[] schemaName, byte[] 
tableName,
   long clientTimeStamp, RowLock rowLock, int 
clientVersion) throws IOException, SQLException {
 Region region = env.getRegion();
 final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, 
tableName);
-// if this region doesn't contain the metadata rows look up the table 
by using PhoenixRuntime.getTable
+// if this region doesn't contain the metadata rows then fail
 if (!region.getRegionInfo().containsRow(key)) {
 throw new 
SQLExceptionInfo.Builder(SQLExceptionCode.GET_TABLE_ERROR)
 .setSchemaName(Bytes.toString(schemaName))



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5584 Older clients don't get correct view metadata when a 4.15 client creates a view.

2019-11-25 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 35e19bd  PHOENIX-5584 Older clients don't get correct view metadata 
when a 4.15 client creates a view.
35e19bd is described below

commit 35e19bde32613bca31b09c48e8047b979136a951
Author: Lars Hofhansl 
AuthorDate: Mon Nov 25 16:20:58 2019 -0800

PHOENIX-5584 Older clients don't get correct view metadata when a 4.15 
client creates a view.
---
 .../apache/phoenix/coprocessor/MetaDataEndpointImpl.java| 13 ++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 9fc6020..21c0823 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -601,6 +601,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements Coprocesso
 getCoprocessorHost().preGetTable(Bytes.toString(tenantId), 
SchemaUtil.getTableName(schemaName, tableName),
 TableName.valueOf(table.getPhysicalName().getBytes()));
 
+if (request.getClientVersion() < MIN_SPLITTABLE_SYSTEM_CATALOG
+&& table.getType() == PTableType.VIEW
+&& table.getViewType() != ViewType.MAPPED) {
+try (PhoenixConnection connection = 
QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class))
 {
+PTable pTable = PhoenixRuntime.getTableNoCache(connection, 
table.getParentName().getString());
+table = 
ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, pTable);
+}
+}
 
builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
 builder.setMutationTime(currentTime);
 if (blockWriteRebuildIndex) {
@@ -2806,14 +2814,13 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 }
 
 /**
- * Looks up the table locally if its present on this region, or else makes 
an rpc call
- * to look up the region using PhoenixRuntime.getTable
+ * Looks up the table locally if its present on this region.
  */
 private PTable doGetTable(byte[] tenantId, byte[] schemaName, byte[] 
tableName,
   long clientTimeStamp, RowLock rowLock, int 
clientVersion) throws IOException, SQLException {
 Region region = env.getRegion();
 final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, 
tableName);
-// if this region doesn't contain the metadata rows look up the table 
by using PhoenixRuntime.getTable
+// if this region doesn't contain the metadata rows then fail
 if (!region.getRegionInfo().containsRow(key)) {
 throw new 
SQLExceptionInfo.Builder(SQLExceptionCode.GET_TABLE_ERROR)
 .setSchemaName(Bytes.toString(schemaName))



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5584 Older clients don't get correct view metadata when a 4.15 client creates a view.

2019-11-25 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 8828b98  PHOENIX-5584 Older clients don't get correct view metadata 
when a 4.15 client creates a view.
8828b98 is described below

commit 8828b98c75bb7c6df1fb2d287beb44815b0a9909
Author: Lars Hofhansl 
AuthorDate: Mon Nov 25 16:20:58 2019 -0800

PHOENIX-5584 Older clients don't get correct view metadata when a 4.15 
client creates a view.
---
 .../apache/phoenix/coprocessor/MetaDataEndpointImpl.java| 13 ++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 9fc6020..21c0823 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -601,6 +601,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements Coprocesso
 getCoprocessorHost().preGetTable(Bytes.toString(tenantId), 
SchemaUtil.getTableName(schemaName, tableName),
 TableName.valueOf(table.getPhysicalName().getBytes()));
 
+if (request.getClientVersion() < MIN_SPLITTABLE_SYSTEM_CATALOG
+&& table.getType() == PTableType.VIEW
+&& table.getViewType() != ViewType.MAPPED) {
+try (PhoenixConnection connection = 
QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class))
 {
+PTable pTable = PhoenixRuntime.getTableNoCache(connection, 
table.getParentName().getString());
+table = 
ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, pTable);
+}
+}
 
builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
 builder.setMutationTime(currentTime);
 if (blockWriteRebuildIndex) {
@@ -2806,14 +2814,13 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 }
 
 /**
- * Looks up the table locally if its present on this region, or else makes 
an rpc call
- * to look up the region using PhoenixRuntime.getTable
+ * Looks up the table locally if its present on this region.
  */
 private PTable doGetTable(byte[] tenantId, byte[] schemaName, byte[] 
tableName,
   long clientTimeStamp, RowLock rowLock, int 
clientVersion) throws IOException, SQLException {
 Region region = env.getRegion();
 final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, 
tableName);
-// if this region doesn't contain the metadata rows look up the table 
by using PhoenixRuntime.getTable
+// if this region doesn't contain the metadata rows then fail
 if (!region.getRegionInfo().containsRow(key)) {
 throw new 
SQLExceptionInfo.Builder(SQLExceptionCode.GET_TABLE_ERROR)
 .setSchemaName(Bytes.toString(schemaName))



[phoenix] branch master updated: PHOENIX-5559 Fix remaining issues with Long viewIndexIds.

2019-11-13 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 910b72b  PHOENIX-5559 Fix remaining issues with Long viewIndexIds.
910b72b is described below

commit 910b72bf5d3b51a0c30ce43d9b19c0ce089cda62
Author: Lars Hofhansl 
AuthorDate: Wed Nov 13 10:21:03 2019 -0800

PHOENIX-5559 Fix remaining issues with Long viewIndexIds.
---
 .../end2end/BaseTenantSpecificViewIndexIT.java |  10 +-
 .../org/apache/phoenix/end2end/BaseViewIT.java |   8 +-
 .../phoenix/end2end/TenantSpecificViewIndexIT.java |   4 +-
 .../java/org/apache/phoenix/end2end/UpgradeIT.java |   2 +-
 .../it/java/org/apache/phoenix/end2end/ViewIT.java |  10 +-
 .../index/ChildViewsUseParentViewIndexIT.java  |   4 +-
 .../end2end/index/GlobalIndexOptimizationIT.java   |   2 +-
 .../apache/phoenix/end2end/index/IndexUsageIT.java |   4 +-
 .../apache/phoenix/end2end/index/LocalIndexIT.java |   2 +-
 .../end2end/index/MutableIndexFailureIT.java   |   2 +-
 .../phoenix/end2end/index/ShortViewIndexIdIT.java  | 104 +
 .../apache/phoenix/end2end/index/ViewIndexIT.java  |   4 +-
 .../coprocessor/BaseScannerRegionObserver.java |  13 ++-
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |   6 +-
 .../org/apache/phoenix/iterate/ExplainTable.java   |   2 +-
 .../java/org/apache/phoenix/util/MetaDataUtil.java |  18 
 16 files changed, 163 insertions(+), 32 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
index 216e2d3..9860624 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
@@ -140,18 +140,18 @@ public class BaseTenantSpecificViewIndexIT extends 
SplitSystemCatalogIT {
 ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT k1, 
k2, v2 FROM " + viewName + " WHERE v2='" + valuePrefix + "v2-1'");
 if(localIndex){
 assertEquals(saltBuckets == null ? 
-"CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName + " 
[" + Long.toString(1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + 
valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName + " 
[" + (1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + valuePrefix + 
"v2-1']\n"
 + "SERVER FILTER BY FIRST KEY ONLY\n"
 + "CLIENT MERGE SORT" :
-"CLIENT PARALLEL 3-WAY RANGE SCAN OVER " + tableName + " 
[" + Long.toString(1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + 
valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 3-WAY RANGE SCAN OVER " + tableName + " 
[" + (1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + valuePrefix + 
"v2-1']\n"
 + "SERVER FILTER BY FIRST KEY ONLY\n"
 + "CLIENT MERGE SORT", 
QueryUtil.getExplainPlan(rs));
 } else {
 String expected = saltBuckets == null ? 
-"CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [" + Long.toString(Long.MIN_VALUE + expectedIndexIdOffset) + ",'" + 
tenantId + "','" + valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [" + (Short.MIN_VALUE + expectedIndexIdOffset) + ",'" + tenantId + "','" + 
valuePrefix + "v2-1']\n"
 + "SERVER FILTER BY FIRST KEY ONLY" :
-"CLIENT PARALLEL 3-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [0," + Long.toString(Long.MIN_VALUE + expectedIndexIdOffset) + ",'" + 
tenantId + "','" + valuePrefix +
-"v2-1'] - ["+(saltBuckets.intValue()-1)+"," + 
Long.toString(Long.MIN_VALUE + expectedIndexIdOffset) + ",'" + tenantId + "','" 
+ valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 3-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [0," + (Short.MIN_VALUE + expectedIndexIdOffset) + ",'" + tenantId + "','" 
+ valuePrefix +
+"v2-1'] - ["+(saltBuckets.intValue()-1)+"," + 
(Short.MIN_VALUE + expectedIndexIdOffset) +

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5559 Fix remaining issues with Long viewIndexIds.

2019-11-13 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 0c5ae7b  PHOENIX-5559 Fix remaining issues with Long viewIndexIds.
0c5ae7b is described below

commit 0c5ae7b3a64f2bec6b9bc08cb8fbe204eae9528b
Author: Lars Hofhansl 
AuthorDate: Wed Nov 13 10:21:03 2019 -0800

PHOENIX-5559 Fix remaining issues with Long viewIndexIds.
---
 .../end2end/BaseTenantSpecificViewIndexIT.java |  10 +-
 .../org/apache/phoenix/end2end/BaseViewIT.java |   8 +-
 .../phoenix/end2end/TenantSpecificViewIndexIT.java |   4 +-
 .../java/org/apache/phoenix/end2end/UpgradeIT.java |   2 +-
 .../it/java/org/apache/phoenix/end2end/ViewIT.java |  10 +-
 .../index/ChildViewsUseParentViewIndexIT.java  |   4 +-
 .../end2end/index/GlobalIndexOptimizationIT.java   |   2 +-
 .../apache/phoenix/end2end/index/IndexUsageIT.java |   4 +-
 .../apache/phoenix/end2end/index/LocalIndexIT.java |   2 +-
 .../end2end/index/MutableIndexFailureIT.java   |   2 +-
 .../phoenix/end2end/index/ShortViewIndexIdIT.java  | 104 +
 .../apache/phoenix/end2end/index/ViewIndexIT.java  |   4 +-
 .../coprocessor/BaseScannerRegionObserver.java |  13 ++-
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |   6 +-
 .../org/apache/phoenix/iterate/ExplainTable.java   |   2 +-
 .../java/org/apache/phoenix/util/MetaDataUtil.java |  18 
 16 files changed, 163 insertions(+), 32 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
index 216e2d3..9860624 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
@@ -140,18 +140,18 @@ public class BaseTenantSpecificViewIndexIT extends 
SplitSystemCatalogIT {
 ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT k1, 
k2, v2 FROM " + viewName + " WHERE v2='" + valuePrefix + "v2-1'");
 if(localIndex){
 assertEquals(saltBuckets == null ? 
-"CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName + " 
[" + Long.toString(1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + 
valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName + " 
[" + (1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + valuePrefix + 
"v2-1']\n"
 + "SERVER FILTER BY FIRST KEY ONLY\n"
 + "CLIENT MERGE SORT" :
-"CLIENT PARALLEL 3-WAY RANGE SCAN OVER " + tableName + " 
[" + Long.toString(1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + 
valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 3-WAY RANGE SCAN OVER " + tableName + " 
[" + (1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + valuePrefix + 
"v2-1']\n"
 + "SERVER FILTER BY FIRST KEY ONLY\n"
 + "CLIENT MERGE SORT", 
QueryUtil.getExplainPlan(rs));
 } else {
 String expected = saltBuckets == null ? 
-"CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [" + Long.toString(Long.MIN_VALUE + expectedIndexIdOffset) + ",'" + 
tenantId + "','" + valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [" + (Short.MIN_VALUE + expectedIndexIdOffset) + ",'" + tenantId + "','" + 
valuePrefix + "v2-1']\n"
 + "SERVER FILTER BY FIRST KEY ONLY" :
-"CLIENT PARALLEL 3-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [0," + Long.toString(Long.MIN_VALUE + expectedIndexIdOffset) + ",'" + 
tenantId + "','" + valuePrefix +
-"v2-1'] - ["+(saltBuckets.intValue()-1)+"," + 
Long.toString(Long.MIN_VALUE + expectedIndexIdOffset) + ",'" + tenantId + "','" 
+ valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 3-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [0," + (Short.MIN_VALUE + expectedIndexIdOffset) + ",'" + tenantId + "','" 
+ valuePrefix +
+"v2-1'] - ["+(saltBuckets.intValue()-1)+"," + 
(Short.MIN_VALUE + expectedI

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5559 Fix remaining issues with Long viewIndexIds.

2019-11-13 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 2c6986e  PHOENIX-5559 Fix remaining issues with Long viewIndexIds.
2c6986e is described below

commit 2c6986efccfa7d9bb323831334816ac6b9cb7d6d
Author: Lars Hofhansl 
AuthorDate: Wed Nov 13 10:21:03 2019 -0800

PHOENIX-5559 Fix remaining issues with Long viewIndexIds.
---
 .../end2end/BaseTenantSpecificViewIndexIT.java |  10 +-
 .../org/apache/phoenix/end2end/BaseViewIT.java |   8 +-
 .../phoenix/end2end/TenantSpecificViewIndexIT.java |   4 +-
 .../java/org/apache/phoenix/end2end/UpgradeIT.java |   2 +-
 .../it/java/org/apache/phoenix/end2end/ViewIT.java |  10 +-
 .../index/ChildViewsUseParentViewIndexIT.java  |   4 +-
 .../end2end/index/GlobalIndexOptimizationIT.java   |   2 +-
 .../apache/phoenix/end2end/index/IndexUsageIT.java |   4 +-
 .../apache/phoenix/end2end/index/LocalIndexIT.java |   2 +-
 .../end2end/index/MutableIndexFailureIT.java   |   2 +-
 .../phoenix/end2end/index/ShortViewIndexIdIT.java  | 104 +
 .../apache/phoenix/end2end/index/ViewIndexIT.java  |   4 +-
 .../coprocessor/BaseScannerRegionObserver.java |  13 ++-
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |   6 +-
 .../org/apache/phoenix/iterate/ExplainTable.java   |   2 +-
 .../java/org/apache/phoenix/util/MetaDataUtil.java |  18 
 16 files changed, 163 insertions(+), 32 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
index 216e2d3..9860624 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
@@ -140,18 +140,18 @@ public class BaseTenantSpecificViewIndexIT extends 
SplitSystemCatalogIT {
 ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT k1, 
k2, v2 FROM " + viewName + " WHERE v2='" + valuePrefix + "v2-1'");
 if(localIndex){
 assertEquals(saltBuckets == null ? 
-"CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName + " 
[" + Long.toString(1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + 
valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName + " 
[" + (1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + valuePrefix + 
"v2-1']\n"
 + "SERVER FILTER BY FIRST KEY ONLY\n"
 + "CLIENT MERGE SORT" :
-"CLIENT PARALLEL 3-WAY RANGE SCAN OVER " + tableName + " 
[" + Long.toString(1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + 
valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 3-WAY RANGE SCAN OVER " + tableName + " 
[" + (1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + valuePrefix + 
"v2-1']\n"
 + "SERVER FILTER BY FIRST KEY ONLY\n"
 + "CLIENT MERGE SORT", 
QueryUtil.getExplainPlan(rs));
 } else {
 String expected = saltBuckets == null ? 
-"CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [" + Long.toString(Long.MIN_VALUE + expectedIndexIdOffset) + ",'" + 
tenantId + "','" + valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [" + (Short.MIN_VALUE + expectedIndexIdOffset) + ",'" + tenantId + "','" + 
valuePrefix + "v2-1']\n"
 + "SERVER FILTER BY FIRST KEY ONLY" :
-"CLIENT PARALLEL 3-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [0," + Long.toString(Long.MIN_VALUE + expectedIndexIdOffset) + ",'" + 
tenantId + "','" + valuePrefix +
-"v2-1'] - ["+(saltBuckets.intValue()-1)+"," + 
Long.toString(Long.MIN_VALUE + expectedIndexIdOffset) + ",'" + tenantId + "','" 
+ valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 3-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [0," + (Short.MIN_VALUE + expectedIndexIdOffset) + ",'" + tenantId + "','" 
+ valuePrefix +
+"v2-1'] - ["+(saltBuckets.intValue()-1)+"," + 
(Short.MIN_VALUE + expectedI

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5559 Fix remaining issues with Long viewIndexIds.

2019-11-13 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new a160314  PHOENIX-5559 Fix remaining issues with Long viewIndexIds.
a160314 is described below

commit a16031441823bacc872ec8f832594d2280c6e83b
Author: Lars Hofhansl 
AuthorDate: Wed Nov 13 10:21:03 2019 -0800

PHOENIX-5559 Fix remaining issues with Long viewIndexIds.
---
 .../end2end/BaseTenantSpecificViewIndexIT.java |  10 +-
 .../org/apache/phoenix/end2end/BaseViewIT.java |   8 +-
 .../phoenix/end2end/TenantSpecificViewIndexIT.java |   4 +-
 .../java/org/apache/phoenix/end2end/UpgradeIT.java |   2 +-
 .../it/java/org/apache/phoenix/end2end/ViewIT.java |  10 +-
 .../index/ChildViewsUseParentViewIndexIT.java  |   4 +-
 .../end2end/index/GlobalIndexOptimizationIT.java   |   2 +-
 .../apache/phoenix/end2end/index/IndexUsageIT.java |   4 +-
 .../apache/phoenix/end2end/index/LocalIndexIT.java |   2 +-
 .../end2end/index/MutableIndexFailureIT.java   |   2 +-
 .../phoenix/end2end/index/ShortViewIndexIdIT.java  | 104 +
 .../apache/phoenix/end2end/index/ViewIndexIT.java  |   4 +-
 .../coprocessor/BaseScannerRegionObserver.java |  13 ++-
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |   6 +-
 .../org/apache/phoenix/iterate/ExplainTable.java   |   2 +-
 .../java/org/apache/phoenix/util/MetaDataUtil.java |  18 
 16 files changed, 163 insertions(+), 32 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
index 216e2d3..9860624 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
@@ -140,18 +140,18 @@ public class BaseTenantSpecificViewIndexIT extends 
SplitSystemCatalogIT {
 ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT k1, 
k2, v2 FROM " + viewName + " WHERE v2='" + valuePrefix + "v2-1'");
 if(localIndex){
 assertEquals(saltBuckets == null ? 
-"CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName + " 
[" + Long.toString(1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + 
valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName + " 
[" + (1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + valuePrefix + 
"v2-1']\n"
 + "SERVER FILTER BY FIRST KEY ONLY\n"
 + "CLIENT MERGE SORT" :
-"CLIENT PARALLEL 3-WAY RANGE SCAN OVER " + tableName + " 
[" + Long.toString(1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + 
valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 3-WAY RANGE SCAN OVER " + tableName + " 
[" + (1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + valuePrefix + 
"v2-1']\n"
 + "SERVER FILTER BY FIRST KEY ONLY\n"
 + "CLIENT MERGE SORT", 
QueryUtil.getExplainPlan(rs));
 } else {
 String expected = saltBuckets == null ? 
-"CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [" + Long.toString(Long.MIN_VALUE + expectedIndexIdOffset) + ",'" + 
tenantId + "','" + valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [" + (Short.MIN_VALUE + expectedIndexIdOffset) + ",'" + tenantId + "','" + 
valuePrefix + "v2-1']\n"
 + "SERVER FILTER BY FIRST KEY ONLY" :
-"CLIENT PARALLEL 3-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [0," + Long.toString(Long.MIN_VALUE + expectedIndexIdOffset) + ",'" + 
tenantId + "','" + valuePrefix +
-"v2-1'] - ["+(saltBuckets.intValue()-1)+"," + 
Long.toString(Long.MIN_VALUE + expectedIndexIdOffset) + ",'" + tenantId + "','" 
+ valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 3-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [0," + (Short.MIN_VALUE + expectedIndexIdOffset) + ",'" + tenantId + "','" 
+ valuePrefix +
+"v2-1'] - ["+(saltBuckets.intValue()-1)+"," + 
(Short.MIN_VALUE + expectedI

[phoenix] branch master updated: PHOENIX-5533 Creating a view or index with a 4.14 client and 4.15.0 server fails with a NullPointerException.

2019-10-24 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new f3f722e  PHOENIX-5533 Creating a view or index with a 4.14 client and 
4.15.0 server fails with a NullPointerException.
f3f722e is described below

commit f3f722e4f29293885f1854cca9dd4cd37e6ff085
Author: Lars Hofhansl 
AuthorDate: Thu Oct 24 08:47:44 2019 -0700

PHOENIX-5533 Creating a view or index with a 4.14 client and 4.15.0 server 
fails with a NullPointerException.
---
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  | 46 ++
 1 file changed, 46 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 8c80cd3..312602b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1735,6 +1735,45 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
 byte[][] parentPhysicalSchemaTableNames = new byte[3][];
 getParentAndPhysicalNames(tableMetadata, 
parentSchemaTableNames, parentPhysicalSchemaTableNames);
 if (parentPhysicalSchemaTableNames[2] != null) {
+if (parentTable == null) {
+// This is needed when we connect with a 4.14 client to
+// a 4.15.0+ server.
+// In that case we need to resolve the parent table on
+// the server.
+parentTable = doGetTable(ByteUtil.EMPTY_BYTE_ARRAY,
+parentPhysicalSchemaTableNames[1],
+parentPhysicalSchemaTableNames[2], 
clientTimeStamp, clientVersion);
+if (parentTable == null) {
+builder.setReturnCode(
+
MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
+
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
+done.run(builder.build());
+return;
+}
+if (parentSchemaTableNames[2] != null
+&& Bytes.compareTo(parentSchemaTableNames[2],
+parentPhysicalSchemaTableNames[2]) != 
0) {
+// if view is created on view
+byte[] tenantId = parentSchemaTableNames[0] == null
+? ByteUtil.EMPTY_BYTE_ARRAY
+: parentSchemaTableNames[0];
+parentTable = doGetTable(tenantId, 
parentSchemaTableNames[1],
+parentSchemaTableNames[2], 
clientTimeStamp, clientVersion);
+if (parentTable == null) {
+// it could be a global view
+parentTable = 
doGetTable(ByteUtil.EMPTY_BYTE_ARRAY,
+parentSchemaTableNames[1], 
parentSchemaTableNames[2],
+clientTimeStamp, clientVersion);
+}
+}
+if (parentTable == null) {
+builder.setReturnCode(
+
MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
+
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
+done.run(builder.build());
+return;
+}
+}
 parentTableKey = 
SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY,
 parentPhysicalSchemaTableNames[1], 
parentPhysicalSchemaTableNames[2]);
 cParentPhysicalName = 
parentTable.getPhysicalName().getBytes();
@@ -1757,6 +1796,13 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
  */
 parentTableName = 
MetaDataUtil.getParentTableName(tableMetadata);
 parentTableKey = SchemaUtil.getTableKey(tenantIdBytes, 
parentSchemaName, parentTableName);
+if (parentTable == null) {
+// This is needed when we connect with a 4.14 client to a 
4.15.0+ server.
+// In that case we need to resolve the parent table on the 
server.
+parentTable =
+doGetTable(tenantIdBytes, parentSchemaName, 
parentTableName, clientTimeSta

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5533 Creating a view or index with a 4.14 client and 4.15.0 server fails with a NullPointerException.

2019-10-24 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 2ed532f  PHOENIX-5533 Creating a view or index with a 4.14 client and 
4.15.0 server fails with a NullPointerException.
2ed532f is described below

commit 2ed532f7d1e6574af246abe62ff92d0ff7e4f8b1
Author: Lars Hofhansl 
AuthorDate: Thu Oct 24 08:47:44 2019 -0700

PHOENIX-5533 Creating a view or index with a 4.14 client and 4.15.0 server 
fails with a NullPointerException.
---
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  | 46 ++
 1 file changed, 46 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 6df5bf8..7558b8d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1730,6 +1730,45 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 byte[][] parentPhysicalSchemaTableNames = new byte[3][];
 getParentAndPhysicalNames(tableMetadata, 
parentSchemaTableNames, parentPhysicalSchemaTableNames);
 if (parentPhysicalSchemaTableNames[2] != null) {
+if (parentTable == null) {
+// This is needed when we connect with a 4.14 client to
+// a 4.15.0+ server.
+// In that case we need to resolve the parent table on
+// the server.
+parentTable = doGetTable(ByteUtil.EMPTY_BYTE_ARRAY,
+parentPhysicalSchemaTableNames[1],
+parentPhysicalSchemaTableNames[2], 
clientTimeStamp, clientVersion);
+if (parentTable == null) {
+builder.setReturnCode(
+
MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
+
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
+done.run(builder.build());
+return;
+}
+if (parentSchemaTableNames[2] != null
+&& Bytes.compareTo(parentSchemaTableNames[2],
+parentPhysicalSchemaTableNames[2]) != 
0) {
+// if view is created on view
+byte[] tenantId = parentSchemaTableNames[0] == null
+? ByteUtil.EMPTY_BYTE_ARRAY
+: parentSchemaTableNames[0];
+parentTable = doGetTable(tenantId, 
parentSchemaTableNames[1],
+parentSchemaTableNames[2], 
clientTimeStamp, clientVersion);
+if (parentTable == null) {
+// it could be a global view
+parentTable = 
doGetTable(ByteUtil.EMPTY_BYTE_ARRAY,
+parentSchemaTableNames[1], 
parentSchemaTableNames[2],
+clientTimeStamp, clientVersion);
+}
+}
+if (parentTable == null) {
+builder.setReturnCode(
+
MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
+
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
+done.run(builder.build());
+return;
+}
+}
 parentTableKey = 
SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY,
 parentPhysicalSchemaTableNames[1], 
parentPhysicalSchemaTableNames[2]);
 cParentPhysicalName = 
parentTable.getPhysicalName().getBytes();
@@ -1752,6 +1791,13 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
  */
 parentTableName = 
MetaDataUtil.getParentTableName(tableMetadata);
 parentTableKey = SchemaUtil.getTableKey(tenantIdBytes, 
parentSchemaName, parentTableName);
+if (parentTable == null) {
+// This is needed when we connect with a 4.14 client to a 
4.15.0+ server.
+// In that case we need to resolve the parent table on the 
server.
+parentTable =
+doGetTable(tenantIdBytes, parentSchemaName, 
parentT

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5533 Creating a view or index with a 4.14 client and 4.15.0 server fails with a NullPointerException.

2019-10-24 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new dd662b1  PHOENIX-5533 Creating a view or index with a 4.14 client and 
4.15.0 server fails with a NullPointerException.
dd662b1 is described below

commit dd662b1b92971ed3a377f49736759f375164e445
Author: Lars Hofhansl 
AuthorDate: Thu Oct 24 08:47:44 2019 -0700

PHOENIX-5533 Creating a view or index with a 4.14 client and 4.15.0 server 
fails with a NullPointerException.
---
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  | 46 ++
 1 file changed, 46 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 6df5bf8..7558b8d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1730,6 +1730,45 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 byte[][] parentPhysicalSchemaTableNames = new byte[3][];
 getParentAndPhysicalNames(tableMetadata, 
parentSchemaTableNames, parentPhysicalSchemaTableNames);
 if (parentPhysicalSchemaTableNames[2] != null) {
+if (parentTable == null) {
+// This is needed when we connect with a 4.14 client to
+// a 4.15.0+ server.
+// In that case we need to resolve the parent table on
+// the server.
+parentTable = doGetTable(ByteUtil.EMPTY_BYTE_ARRAY,
+parentPhysicalSchemaTableNames[1],
+parentPhysicalSchemaTableNames[2], 
clientTimeStamp, clientVersion);
+if (parentTable == null) {
+builder.setReturnCode(
+
MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
+
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
+done.run(builder.build());
+return;
+}
+if (parentSchemaTableNames[2] != null
+&& Bytes.compareTo(parentSchemaTableNames[2],
+parentPhysicalSchemaTableNames[2]) != 
0) {
+// if view is created on view
+byte[] tenantId = parentSchemaTableNames[0] == null
+? ByteUtil.EMPTY_BYTE_ARRAY
+: parentSchemaTableNames[0];
+parentTable = doGetTable(tenantId, 
parentSchemaTableNames[1],
+parentSchemaTableNames[2], 
clientTimeStamp, clientVersion);
+if (parentTable == null) {
+// it could be a global view
+parentTable = 
doGetTable(ByteUtil.EMPTY_BYTE_ARRAY,
+parentSchemaTableNames[1], 
parentSchemaTableNames[2],
+clientTimeStamp, clientVersion);
+}
+}
+if (parentTable == null) {
+builder.setReturnCode(
+
MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
+
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
+done.run(builder.build());
+return;
+}
+}
 parentTableKey = 
SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY,
 parentPhysicalSchemaTableNames[1], 
parentPhysicalSchemaTableNames[2]);
 cParentPhysicalName = 
parentTable.getPhysicalName().getBytes();
@@ -1752,6 +1791,13 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
  */
 parentTableName = 
MetaDataUtil.getParentTableName(tableMetadata);
 parentTableKey = SchemaUtil.getTableKey(tenantIdBytes, 
parentSchemaName, parentTableName);
+if (parentTable == null) {
+// This is needed when we connect with a 4.14 client to a 
4.15.0+ server.
+// In that case we need to resolve the parent table on the 
server.
+parentTable =
+doGetTable(tenantIdBytes, parentSchemaName, 
parentT

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5533 Creating a view or index with a 4.14 client and 4.15.0 server fails with a NullPointerException.

2019-10-24 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 0b9a039  PHOENIX-5533 Creating a view or index with a 4.14 client and 
4.15.0 server fails with a NullPointerException.
0b9a039 is described below

commit 0b9a0395554dcf72ece54c131fb628e7c3329902
Author: Lars Hofhansl 
AuthorDate: Thu Oct 24 08:47:44 2019 -0700

PHOENIX-5533 Creating a view or index with a 4.14 client and 4.15.0 server 
fails with a NullPointerException.
---
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  | 46 ++
 1 file changed, 46 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 6df5bf8..7558b8d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1730,6 +1730,45 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 byte[][] parentPhysicalSchemaTableNames = new byte[3][];
 getParentAndPhysicalNames(tableMetadata, 
parentSchemaTableNames, parentPhysicalSchemaTableNames);
 if (parentPhysicalSchemaTableNames[2] != null) {
+if (parentTable == null) {
+// This is needed when we connect with a 4.14 client to
+// a 4.15.0+ server.
+// In that case we need to resolve the parent table on
+// the server.
+parentTable = doGetTable(ByteUtil.EMPTY_BYTE_ARRAY,
+parentPhysicalSchemaTableNames[1],
+parentPhysicalSchemaTableNames[2], 
clientTimeStamp, clientVersion);
+if (parentTable == null) {
+builder.setReturnCode(
+
MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
+
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
+done.run(builder.build());
+return;
+}
+if (parentSchemaTableNames[2] != null
+&& Bytes.compareTo(parentSchemaTableNames[2],
+parentPhysicalSchemaTableNames[2]) != 
0) {
+// if view is created on view
+byte[] tenantId = parentSchemaTableNames[0] == null
+? ByteUtil.EMPTY_BYTE_ARRAY
+: parentSchemaTableNames[0];
+parentTable = doGetTable(tenantId, 
parentSchemaTableNames[1],
+parentSchemaTableNames[2], 
clientTimeStamp, clientVersion);
+if (parentTable == null) {
+// it could be a global view
+parentTable = 
doGetTable(ByteUtil.EMPTY_BYTE_ARRAY,
+parentSchemaTableNames[1], 
parentSchemaTableNames[2],
+clientTimeStamp, clientVersion);
+}
+}
+if (parentTable == null) {
+builder.setReturnCode(
+
MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
+
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
+done.run(builder.build());
+return;
+}
+}
 parentTableKey = 
SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY,
 parentPhysicalSchemaTableNames[1], 
parentPhysicalSchemaTableNames[2]);
 cParentPhysicalName = 
parentTable.getPhysicalName().getBytes();
@@ -1752,6 +1791,13 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
  */
 parentTableName = 
MetaDataUtil.getParentTableName(tableMetadata);
 parentTableKey = SchemaUtil.getTableKey(tenantIdBytes, 
parentSchemaName, parentTableName);
+if (parentTable == null) {
+// This is needed when we connect with a 4.14 client to a 
4.15.0+ server.
+// In that case we need to resolve the parent table on the 
server.
+parentTable =
+doGetTable(tenantIdBytes, parentSchemaName, 
parentT

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5523 Prepare for newly released HBase 1.5.0.

2019-10-14 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 7fb4528  PHOENIX-5523 Prepare for newly released HBase 1.5.0.
7fb4528 is described below

commit 7fb4528ea47bcc8ae0f71268d0690e2cd5ac0c9c
Author: Lars Hofhansl 
AuthorDate: Mon Oct 14 21:41:05 2019 -0700

PHOENIX-5523 Prepare for newly released HBase 1.5.0.
---
 .../java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java  | 5 +
 pom.xml  | 2 +-
 2 files changed, 6 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
index 9724126..6855da9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
@@ -680,6 +680,11 @@ public class DelegateRegionObserver implements 
RegionObserver {
 }
 
 @Override
+public void preWALAppend(ObserverContext 
ctx, WALKey key,
+WALEdit edit) throws IOException {
+}
+
+@Override
 public InternalScanner 
preFlushScannerOpen(ObserverContext c,
 Store store, KeyValueScanner memstoreScanner, InternalScanner s, 
long readPoint)
 throws IOException {
diff --git a/pom.xml b/pom.xml
index ff14eaf..ad8e9c4 100644
--- a/pom.xml
+++ b/pom.xml
@@ -79,7 +79,7 @@
 ${project.basedir}
 
 
-1.5.0-SNAPSHOT
+1.5.0
 2.7.5
 
 



[phoenix] branch master updated: PHOENIX-5499 Upgrading from 4.14.3 client to 4.15.0 client should remove parent->child links from SYSTEM.CATALOG.

2019-10-05 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 74f8464  PHOENIX-5499 Upgrading from 4.14.3 client to 4.15.0 client 
should remove parent->child links from SYSTEM.CATALOG.
74f8464 is described below

commit 74f8464108a98b476c1c39b12150ae37861b7452
Author: Lars Hofhansl 
AuthorDate: Sat Oct 5 13:39:14 2019 -0700

PHOENIX-5499 Upgrading from 4.14.3 client to 4.15.0 client should remove 
parent->child links from SYSTEM.CATALOG.
---
 .../src/main/java/org/apache/phoenix/execute/MutationState.java | 6 +-
 .../src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java| 2 +-
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 44760a8..d887468 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -161,7 +161,11 @@ public class MutationState implements SQLCloseable {
 }
 
 public MutationState(MutationState mutationState) {
-this(mutationState.maxSize, mutationState.maxSizeBytes, 
mutationState.connection, true, mutationState
+this(mutationState, mutationState.connection);
+}
+
+public MutationState(MutationState mutationState, PhoenixConnection 
connection) {
+this(mutationState.maxSize, mutationState.maxSizeBytes, connection, 
true, mutationState
 .getPhoenixTransactionContext());
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index d668758..988a7c6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -372,7 +372,7 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
 this.isRequestLevelMetricsEnabled = 
JDBCUtil.isCollectingRequestLevelMetricsEnabled(url, info,
 this.services.getProps());
 this.mutationState = mutationState == null ? newMutationState(maxSize,
-maxSizeBytes) : new MutationState(mutationState);
+maxSizeBytes) : new MutationState(mutationState, this);
 this.metaData = metaData;
 this.metaData.pruneTables(pruner);
 this.metaData.pruneFunctions(pruner);



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5499 Upgrading from 4.14.3 client to 4.15.0 client should remove parent->child links from SYSTEM.CATALOG.

2019-10-05 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 86af6ea  PHOENIX-5499 Upgrading from 4.14.3 client to 4.15.0 client 
should remove parent->child links from SYSTEM.CATALOG.
86af6ea is described below

commit 86af6ea2ccd24cfcba4fe8901c422afb55bf9751
Author: Lars Hofhansl 
AuthorDate: Sat Oct 5 13:39:14 2019 -0700

PHOENIX-5499 Upgrading from 4.14.3 client to 4.15.0 client should remove 
parent->child links from SYSTEM.CATALOG.
---
 .../src/main/java/org/apache/phoenix/execute/MutationState.java | 6 +-
 .../src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java| 2 +-
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 856c6bc55..434d1f7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -161,7 +161,11 @@ public class MutationState implements SQLCloseable {
 }
 
 public MutationState(MutationState mutationState) {
-this(mutationState.maxSize, mutationState.maxSizeBytes, 
mutationState.connection, true, mutationState
+this(mutationState, mutationState.connection);
+}
+
+public MutationState(MutationState mutationState, PhoenixConnection 
connection) {
+this(mutationState.maxSize, mutationState.maxSizeBytes, connection, 
true, mutationState
 .getPhoenixTransactionContext());
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index d668758..988a7c6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -372,7 +372,7 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
 this.isRequestLevelMetricsEnabled = 
JDBCUtil.isCollectingRequestLevelMetricsEnabled(url, info,
 this.services.getProps());
 this.mutationState = mutationState == null ? newMutationState(maxSize,
-maxSizeBytes) : new MutationState(mutationState);
+maxSizeBytes) : new MutationState(mutationState, this);
 this.metaData = metaData;
 this.metaData.pruneTables(pruner);
 this.metaData.pruneFunctions(pruner);



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5499 Upgrading from 4.14.3 client to 4.15.0 client should remove parent->child links from SYSTEM.CATALOG.

2019-10-05 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new d99b57c  PHOENIX-5499 Upgrading from 4.14.3 client to 4.15.0 client 
should remove parent->child links from SYSTEM.CATALOG.
d99b57c is described below

commit d99b57cef03ff31d5b0f4a9de775aa8bf3f5b850
Author: Lars Hofhansl 
AuthorDate: Sat Oct 5 13:39:14 2019 -0700

PHOENIX-5499 Upgrading from 4.14.3 client to 4.15.0 client should remove 
parent->child links from SYSTEM.CATALOG.
---
 .../src/main/java/org/apache/phoenix/execute/MutationState.java | 6 +-
 .../src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java| 2 +-
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 856c6bc55..434d1f7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -161,7 +161,11 @@ public class MutationState implements SQLCloseable {
 }
 
 public MutationState(MutationState mutationState) {
-this(mutationState.maxSize, mutationState.maxSizeBytes, 
mutationState.connection, true, mutationState
+this(mutationState, mutationState.connection);
+}
+
+public MutationState(MutationState mutationState, PhoenixConnection 
connection) {
+this(mutationState.maxSize, mutationState.maxSizeBytes, connection, 
true, mutationState
 .getPhoenixTransactionContext());
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index d668758..988a7c6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -372,7 +372,7 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
 this.isRequestLevelMetricsEnabled = 
JDBCUtil.isCollectingRequestLevelMetricsEnabled(url, info,
 this.services.getProps());
 this.mutationState = mutationState == null ? newMutationState(maxSize,
-maxSizeBytes) : new MutationState(mutationState);
+maxSizeBytes) : new MutationState(mutationState, this);
 this.metaData = metaData;
 this.metaData.pruneTables(pruner);
 this.metaData.pruneFunctions(pruner);



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5499 Upgrading from 4.14.3 client to 4.15.0 client should remove parent->child links from SYSTEM.CATALOG.

2019-10-05 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new a9dd41c  PHOENIX-5499 Upgrading from 4.14.3 client to 4.15.0 client 
should remove parent->child links from SYSTEM.CATALOG.
a9dd41c is described below

commit a9dd41c3744f549f9288aa5b9c5b9ffda20b0102
Author: Lars Hofhansl 
AuthorDate: Sat Oct 5 13:39:14 2019 -0700

PHOENIX-5499 Upgrading from 4.14.3 client to 4.15.0 client should remove 
parent->child links from SYSTEM.CATALOG.
---
 .../src/main/java/org/apache/phoenix/execute/MutationState.java | 6 +-
 .../src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java| 2 +-
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 856c6bc55..434d1f7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -161,7 +161,11 @@ public class MutationState implements SQLCloseable {
 }
 
 public MutationState(MutationState mutationState) {
-this(mutationState.maxSize, mutationState.maxSizeBytes, 
mutationState.connection, true, mutationState
+this(mutationState, mutationState.connection);
+}
+
+public MutationState(MutationState mutationState, PhoenixConnection 
connection) {
+this(mutationState.maxSize, mutationState.maxSizeBytes, connection, 
true, mutationState
 .getPhoenixTransactionContext());
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index d668758..988a7c6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -372,7 +372,7 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
 this.isRequestLevelMetricsEnabled = 
JDBCUtil.isCollectingRequestLevelMetricsEnabled(url, info,
 this.services.getProps());
 this.mutationState = mutationState == null ? newMutationState(maxSize,
-maxSizeBytes) : new MutationState(mutationState);
+maxSizeBytes) : new MutationState(mutationState, this);
 this.metaData = metaData;
 this.metaData.pruneTables(pruner);
 this.metaData.pruneFunctions(pruner);



[phoenix] branch master updated: Revert "PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since the author did not add a license header."

2019-09-27 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 3bea413  Revert "PHOENIX-5463 Remove AndExpressionTest and 
OrExpressionTest since the author did not add a license header."
3bea413 is described below

commit 3bea4131945943189edfe09fd6019c04552f8a19
Author: Lars Hofhansl 
AuthorDate: Fri Sep 27 11:49:06 2019 -0700

Revert "PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since 
the author did not add a license header."

This reverts commit d3e16ae7ab3e4a328d523dd7aa4b0b740109ae7f.
---
 .../phoenix/expression/AndExpressionTest.java  | 314 +
 .../phoenix/expression/OrExpressionTest.java   | 310 
 2 files changed, 624 insertions(+)

diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
new file mode 100644
index 000..a223a19
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
@@ -0,0 +1,314 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.PBaseColumn;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PNameFactory;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
+import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PDataType;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class AndExpressionTest {
+
+private AndExpression createAnd(Expression lhs, Expression rhs) {
+return new AndExpression(Arrays.asList(lhs, rhs));
+}
+
+private AndExpression createAnd(Boolean x, Boolean y) {
+return createAnd(LiteralExpression.newConstant(x), 
LiteralExpression.newConstant(y));
+}
+
+private void testImmediateSingle(Boolean expected, Boolean lhs, Boolean 
rhs) {
+AndExpression and = createAnd(lhs, rhs);
+ImmutableBytesWritable out = new ImmutableBytesWritable();
+MultiKeyValueTuple tuple = new MultiKeyValueTuple();
+boolean success = and.evaluate(tuple, out);
+assertTrue(success);
+assertEquals(expected, PBoolean.INSTANCE.toObject(out));
+}
+
+// Evaluating AND when values of both sides are known should immediately 
succeed
+// and return the same result regardless of order.
+private void testImmediate(Boolean expected, Boolean a, Boolean b) {
+testImmediateSingle(expected, a, b);
+testImmediateSingle(expected, b, a);
+}
+
+private PColumn pcolumn(final String name) {
+return new PBaseColumn() {
+@Override public PName getName() {
+return PNameFactory.newName(name);
+}
+
+@Override public PDataType getDataType() {
+return PBoolean.INSTANCE;
+}
+
+@Override public PName getFamilyName() {
+return 
PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY);
+}
+
+@Override public int getPosition() {
+return 0;
+}
+
+@Override public Integer getArraySize() {
+return null;
+}
+
+@Override public byte[] getViewConstant() {
+return new byte[0];
+}
+
+@Override public boolean isViewReferenced() {
+return false;
+ 

[phoenix] branch 4.x-HBase-1.3 updated: Revert "PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since the author did not add a license header."

2019-09-27 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new c554d7d  Revert "PHOENIX-5463 Remove AndExpressionTest and 
OrExpressionTest since the author did not add a license header."
c554d7d is described below

commit c554d7da306115ac7a40cd55763fb62fcb7c0166
Author: Lars Hofhansl 
AuthorDate: Fri Sep 27 11:48:00 2019 -0700

Revert "PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since 
the author did not add a license header."

This reverts commit 9694bbb241117edb8b3711cfaee5e22fa57ca14c.
---
 .../phoenix/expression/AndExpressionTest.java  | 314 +
 .../phoenix/expression/OrExpressionTest.java   | 310 
 2 files changed, 624 insertions(+)

diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
new file mode 100644
index 000..a223a19
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
@@ -0,0 +1,314 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.PBaseColumn;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PNameFactory;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
+import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PDataType;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class AndExpressionTest {
+
+private AndExpression createAnd(Expression lhs, Expression rhs) {
+return new AndExpression(Arrays.asList(lhs, rhs));
+}
+
+private AndExpression createAnd(Boolean x, Boolean y) {
+return createAnd(LiteralExpression.newConstant(x), 
LiteralExpression.newConstant(y));
+}
+
+private void testImmediateSingle(Boolean expected, Boolean lhs, Boolean 
rhs) {
+AndExpression and = createAnd(lhs, rhs);
+ImmutableBytesWritable out = new ImmutableBytesWritable();
+MultiKeyValueTuple tuple = new MultiKeyValueTuple();
+boolean success = and.evaluate(tuple, out);
+assertTrue(success);
+assertEquals(expected, PBoolean.INSTANCE.toObject(out));
+}
+
+// Evaluating AND when values of both sides are known should immediately 
succeed
+// and return the same result regardless of order.
+private void testImmediate(Boolean expected, Boolean a, Boolean b) {
+testImmediateSingle(expected, a, b);
+testImmediateSingle(expected, b, a);
+}
+
+private PColumn pcolumn(final String name) {
+return new PBaseColumn() {
+@Override public PName getName() {
+return PNameFactory.newName(name);
+}
+
+@Override public PDataType getDataType() {
+return PBoolean.INSTANCE;
+}
+
+@Override public PName getFamilyName() {
+return 
PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY);
+}
+
+@Override public int getPosition() {
+return 0;
+}
+
+@Override public Integer getArraySize() {
+return null;
+}
+
+@Override public byte[] getViewConstant() {
+return new byte[0];
+}
+
+@Override public boolean isViewRefe

[phoenix] branch 4.x-HBase-1.4 updated: Revert "PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since the author did not add a license header."

2019-09-27 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 334b320  Revert "PHOENIX-5463 Remove AndExpressionTest and 
OrExpressionTest since the author did not add a license header."
334b320 is described below

commit 334b320c1d4b132ad08cf8af58b56a54f16f293f
Author: Lars Hofhansl 
AuthorDate: Fri Sep 27 11:48:31 2019 -0700

Revert "PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since 
the author did not add a license header."

This reverts commit a4159c1ba013100a6891303183b4ca0e7d577e6a.
---
 .../phoenix/expression/AndExpressionTest.java  | 314 +
 .../phoenix/expression/OrExpressionTest.java   | 310 
 2 files changed, 624 insertions(+)

diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
new file mode 100644
index 000..a223a19
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
@@ -0,0 +1,314 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.PBaseColumn;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PNameFactory;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
+import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PDataType;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class AndExpressionTest {
+
+private AndExpression createAnd(Expression lhs, Expression rhs) {
+return new AndExpression(Arrays.asList(lhs, rhs));
+}
+
+private AndExpression createAnd(Boolean x, Boolean y) {
+return createAnd(LiteralExpression.newConstant(x), 
LiteralExpression.newConstant(y));
+}
+
+private void testImmediateSingle(Boolean expected, Boolean lhs, Boolean 
rhs) {
+AndExpression and = createAnd(lhs, rhs);
+ImmutableBytesWritable out = new ImmutableBytesWritable();
+MultiKeyValueTuple tuple = new MultiKeyValueTuple();
+boolean success = and.evaluate(tuple, out);
+assertTrue(success);
+assertEquals(expected, PBoolean.INSTANCE.toObject(out));
+}
+
+// Evaluating AND when values of both sides are known should immediately 
succeed
+// and return the same result regardless of order.
+private void testImmediate(Boolean expected, Boolean a, Boolean b) {
+testImmediateSingle(expected, a, b);
+testImmediateSingle(expected, b, a);
+}
+
+private PColumn pcolumn(final String name) {
+return new PBaseColumn() {
+@Override public PName getName() {
+return PNameFactory.newName(name);
+}
+
+@Override public PDataType getDataType() {
+return PBoolean.INSTANCE;
+}
+
+@Override public PName getFamilyName() {
+return 
PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY);
+}
+
+@Override public int getPosition() {
+return 0;
+}
+
+@Override public Integer getArraySize() {
+return null;
+}
+
+@Override public byte[] getViewConstant() {
+return new byte[0];
+}
+
+@Override public boolean isViewRefe

[phoenix] branch 4.x-HBase-1.5 updated: Revert "PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since the author did not add a license header."

2019-09-27 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 7de8f5f  Revert "PHOENIX-5463 Remove AndExpressionTest and 
OrExpressionTest since the author did not add a license header."
7de8f5f is described below

commit 7de8f5f0c95c48172beff6920134087003a4a419
Author: Lars Hofhansl 
AuthorDate: Fri Sep 27 11:47:04 2019 -0700

Revert "PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since 
the author did not add a license header."

This reverts commit 82ee8d5917da6a56ea124f281dbbecf9fed4571d.
---
 .../phoenix/expression/AndExpressionTest.java  | 314 +
 .../phoenix/expression/OrExpressionTest.java   | 310 
 2 files changed, 624 insertions(+)

diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
new file mode 100644
index 000..a223a19
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
@@ -0,0 +1,314 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.PBaseColumn;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PNameFactory;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
+import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PDataType;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class AndExpressionTest {
+
+private AndExpression createAnd(Expression lhs, Expression rhs) {
+return new AndExpression(Arrays.asList(lhs, rhs));
+}
+
+private AndExpression createAnd(Boolean x, Boolean y) {
+return createAnd(LiteralExpression.newConstant(x), 
LiteralExpression.newConstant(y));
+}
+
+private void testImmediateSingle(Boolean expected, Boolean lhs, Boolean 
rhs) {
+AndExpression and = createAnd(lhs, rhs);
+ImmutableBytesWritable out = new ImmutableBytesWritable();
+MultiKeyValueTuple tuple = new MultiKeyValueTuple();
+boolean success = and.evaluate(tuple, out);
+assertTrue(success);
+assertEquals(expected, PBoolean.INSTANCE.toObject(out));
+}
+
+// Evaluating AND when values of both sides are known should immediately 
succeed
+// and return the same result regardless of order.
+private void testImmediate(Boolean expected, Boolean a, Boolean b) {
+testImmediateSingle(expected, a, b);
+testImmediateSingle(expected, b, a);
+}
+
+private PColumn pcolumn(final String name) {
+return new PBaseColumn() {
+@Override public PName getName() {
+return PNameFactory.newName(name);
+}
+
+@Override public PDataType getDataType() {
+return PBoolean.INSTANCE;
+}
+
+@Override public PName getFamilyName() {
+return 
PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY);
+}
+
+@Override public int getPosition() {
+return 0;
+}
+
+@Override public Integer getArraySize() {
+return null;
+}
+
+@Override public byte[] getViewConstant() {
+return new byte[0];
+}
+
+@Override public boolean isViewRefe

[phoenix] branch master updated: PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since the author did not add a license header.

2019-09-25 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new d3e16ae  PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest 
since the author did not add a license header.
d3e16ae is described below

commit d3e16ae7ab3e4a328d523dd7aa4b0b740109ae7f
Author: Lars Hofhansl 
AuthorDate: Wed Sep 25 10:32:29 2019 -0700

PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since the author 
did not add a license header.
---
 .../phoenix/expression/AndExpressionTest.java  | 314 -
 .../phoenix/expression/OrExpressionTest.java   | 310 
 2 files changed, 624 deletions(-)

diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
deleted file mode 100644
index a223a19..000
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.expression;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.PBaseColumn;
-import org.apache.phoenix.schema.PColumn;
-import org.apache.phoenix.schema.PName;
-import org.apache.phoenix.schema.PNameFactory;
-import org.apache.phoenix.schema.SortOrder;
-import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
-import org.apache.phoenix.schema.types.PBoolean;
-import org.apache.phoenix.schema.types.PDataType;
-import org.junit.Test;
-
-import java.util.Arrays;
-import java.util.Collections;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-public class AndExpressionTest {
-
-private AndExpression createAnd(Expression lhs, Expression rhs) {
-return new AndExpression(Arrays.asList(lhs, rhs));
-}
-
-private AndExpression createAnd(Boolean x, Boolean y) {
-return createAnd(LiteralExpression.newConstant(x), 
LiteralExpression.newConstant(y));
-}
-
-private void testImmediateSingle(Boolean expected, Boolean lhs, Boolean 
rhs) {
-AndExpression and = createAnd(lhs, rhs);
-ImmutableBytesWritable out = new ImmutableBytesWritable();
-MultiKeyValueTuple tuple = new MultiKeyValueTuple();
-boolean success = and.evaluate(tuple, out);
-assertTrue(success);
-assertEquals(expected, PBoolean.INSTANCE.toObject(out));
-}
-
-// Evaluating AND when values of both sides are known should immediately 
succeed
-// and return the same result regardless of order.
-private void testImmediate(Boolean expected, Boolean a, Boolean b) {
-testImmediateSingle(expected, a, b);
-testImmediateSingle(expected, b, a);
-}
-
-private PColumn pcolumn(final String name) {
-return new PBaseColumn() {
-@Override public PName getName() {
-return PNameFactory.newName(name);
-}
-
-@Override public PDataType getDataType() {
-return PBoolean.INSTANCE;
-}
-
-@Override public PName getFamilyName() {
-return 
PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY);
-}
-
-@Override public int getPosition() {
-return 0;
-}
-
-@Override public Integer getArraySize() {
-return null;
-}
-
-@Override public byte[] getViewConstant() {
-return new byte[0];
-}
-
-@Override public boolean isViewReferenced() {
-return false;
-}
-
-@Override public String getExpressionStr() {
-   

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since the author did not add a license header.

2019-09-25 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new a4159c1  PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest 
since the author did not add a license header.
a4159c1 is described below

commit a4159c1ba013100a6891303183b4ca0e7d577e6a
Author: Lars Hofhansl 
AuthorDate: Wed Sep 25 10:32:29 2019 -0700

PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since the author 
did not add a license header.
---
 .../phoenix/expression/AndExpressionTest.java  | 314 -
 .../phoenix/expression/OrExpressionTest.java   | 310 
 2 files changed, 624 deletions(-)

diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
deleted file mode 100644
index a223a19..000
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.expression;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.PBaseColumn;
-import org.apache.phoenix.schema.PColumn;
-import org.apache.phoenix.schema.PName;
-import org.apache.phoenix.schema.PNameFactory;
-import org.apache.phoenix.schema.SortOrder;
-import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
-import org.apache.phoenix.schema.types.PBoolean;
-import org.apache.phoenix.schema.types.PDataType;
-import org.junit.Test;
-
-import java.util.Arrays;
-import java.util.Collections;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-public class AndExpressionTest {
-
-private AndExpression createAnd(Expression lhs, Expression rhs) {
-return new AndExpression(Arrays.asList(lhs, rhs));
-}
-
-private AndExpression createAnd(Boolean x, Boolean y) {
-return createAnd(LiteralExpression.newConstant(x), 
LiteralExpression.newConstant(y));
-}
-
-private void testImmediateSingle(Boolean expected, Boolean lhs, Boolean 
rhs) {
-AndExpression and = createAnd(lhs, rhs);
-ImmutableBytesWritable out = new ImmutableBytesWritable();
-MultiKeyValueTuple tuple = new MultiKeyValueTuple();
-boolean success = and.evaluate(tuple, out);
-assertTrue(success);
-assertEquals(expected, PBoolean.INSTANCE.toObject(out));
-}
-
-// Evaluating AND when values of both sides are known should immediately 
succeed
-// and return the same result regardless of order.
-private void testImmediate(Boolean expected, Boolean a, Boolean b) {
-testImmediateSingle(expected, a, b);
-testImmediateSingle(expected, b, a);
-}
-
-private PColumn pcolumn(final String name) {
-return new PBaseColumn() {
-@Override public PName getName() {
-return PNameFactory.newName(name);
-}
-
-@Override public PDataType getDataType() {
-return PBoolean.INSTANCE;
-}
-
-@Override public PName getFamilyName() {
-return 
PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY);
-}
-
-@Override public int getPosition() {
-return 0;
-}
-
-@Override public Integer getArraySize() {
-return null;
-}
-
-@Override public byte[] getViewConstant() {
-return new byte[0];
-}
-
-@Override public boolean isViewReferenced() {
-return false;
-}
-
-@Override public String getExpressionStr() {
-   

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since the author did not add a license header.

2019-09-25 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 9694bbb  PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest 
since the author did not add a license header.
9694bbb is described below

commit 9694bbb241117edb8b3711cfaee5e22fa57ca14c
Author: Lars Hofhansl 
AuthorDate: Wed Sep 25 10:32:29 2019 -0700

PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since the author 
did not add a license header.
---
 .../phoenix/expression/AndExpressionTest.java  | 314 -
 .../phoenix/expression/OrExpressionTest.java   | 310 
 2 files changed, 624 deletions(-)

diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
deleted file mode 100644
index a223a19..000
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.expression;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.PBaseColumn;
-import org.apache.phoenix.schema.PColumn;
-import org.apache.phoenix.schema.PName;
-import org.apache.phoenix.schema.PNameFactory;
-import org.apache.phoenix.schema.SortOrder;
-import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
-import org.apache.phoenix.schema.types.PBoolean;
-import org.apache.phoenix.schema.types.PDataType;
-import org.junit.Test;
-
-import java.util.Arrays;
-import java.util.Collections;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-public class AndExpressionTest {
-
-private AndExpression createAnd(Expression lhs, Expression rhs) {
-return new AndExpression(Arrays.asList(lhs, rhs));
-}
-
-private AndExpression createAnd(Boolean x, Boolean y) {
-return createAnd(LiteralExpression.newConstant(x), 
LiteralExpression.newConstant(y));
-}
-
-private void testImmediateSingle(Boolean expected, Boolean lhs, Boolean 
rhs) {
-AndExpression and = createAnd(lhs, rhs);
-ImmutableBytesWritable out = new ImmutableBytesWritable();
-MultiKeyValueTuple tuple = new MultiKeyValueTuple();
-boolean success = and.evaluate(tuple, out);
-assertTrue(success);
-assertEquals(expected, PBoolean.INSTANCE.toObject(out));
-}
-
-// Evaluating AND when values of both sides are known should immediately 
succeed
-// and return the same result regardless of order.
-private void testImmediate(Boolean expected, Boolean a, Boolean b) {
-testImmediateSingle(expected, a, b);
-testImmediateSingle(expected, b, a);
-}
-
-private PColumn pcolumn(final String name) {
-return new PBaseColumn() {
-@Override public PName getName() {
-return PNameFactory.newName(name);
-}
-
-@Override public PDataType getDataType() {
-return PBoolean.INSTANCE;
-}
-
-@Override public PName getFamilyName() {
-return 
PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY);
-}
-
-@Override public int getPosition() {
-return 0;
-}
-
-@Override public Integer getArraySize() {
-return null;
-}
-
-@Override public byte[] getViewConstant() {
-return new byte[0];
-}
-
-@Override public boolean isViewReferenced() {
-return false;
-}
-
-@Override public String getExpressionStr() {
-   

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since the author did not add a license header.

2019-09-25 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 82ee8d5  PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest 
since the author did not add a license header.
82ee8d5 is described below

commit 82ee8d5917da6a56ea124f281dbbecf9fed4571d
Author: Lars Hofhansl 
AuthorDate: Wed Sep 25 10:32:29 2019 -0700

PHOENIX-5463 Remove AndExpressionTest and OrExpressionTest since the author 
did not add a license header.
---
 .../phoenix/expression/AndExpressionTest.java  | 314 -
 .../phoenix/expression/OrExpressionTest.java   | 310 
 2 files changed, 624 deletions(-)

diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
deleted file mode 100644
index a223a19..000
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.expression;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.PBaseColumn;
-import org.apache.phoenix.schema.PColumn;
-import org.apache.phoenix.schema.PName;
-import org.apache.phoenix.schema.PNameFactory;
-import org.apache.phoenix.schema.SortOrder;
-import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
-import org.apache.phoenix.schema.types.PBoolean;
-import org.apache.phoenix.schema.types.PDataType;
-import org.junit.Test;
-
-import java.util.Arrays;
-import java.util.Collections;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-public class AndExpressionTest {
-
-private AndExpression createAnd(Expression lhs, Expression rhs) {
-return new AndExpression(Arrays.asList(lhs, rhs));
-}
-
-private AndExpression createAnd(Boolean x, Boolean y) {
-return createAnd(LiteralExpression.newConstant(x), 
LiteralExpression.newConstant(y));
-}
-
-private void testImmediateSingle(Boolean expected, Boolean lhs, Boolean 
rhs) {
-AndExpression and = createAnd(lhs, rhs);
-ImmutableBytesWritable out = new ImmutableBytesWritable();
-MultiKeyValueTuple tuple = new MultiKeyValueTuple();
-boolean success = and.evaluate(tuple, out);
-assertTrue(success);
-assertEquals(expected, PBoolean.INSTANCE.toObject(out));
-}
-
-// Evaluating AND when values of both sides are known should immediately 
succeed
-// and return the same result regardless of order.
-private void testImmediate(Boolean expected, Boolean a, Boolean b) {
-testImmediateSingle(expected, a, b);
-testImmediateSingle(expected, b, a);
-}
-
-private PColumn pcolumn(final String name) {
-return new PBaseColumn() {
-@Override public PName getName() {
-return PNameFactory.newName(name);
-}
-
-@Override public PDataType getDataType() {
-return PBoolean.INSTANCE;
-}
-
-@Override public PName getFamilyName() {
-return 
PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY);
-}
-
-@Override public int getPosition() {
-return 0;
-}
-
-@Override public Integer getArraySize() {
-return null;
-}
-
-@Override public byte[] getViewConstant() {
-return new byte[0];
-}
-
-@Override public boolean isViewReferenced() {
-return false;
-}
-
-@Override public String getExpressionStr() {
-   

[phoenix] branch master updated: PHOENIX-5486 Projections from local indexes return garbage.

2019-09-20 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 4e4113e  PHOENIX-5486 Projections from local indexes return garbage.
4e4113e is described below

commit 4e4113e2be4f6a57600678173ea649e97a342a79
Author: Lars Hofhansl 
AuthorDate: Fri Sep 20 12:54:52 2019 -0700

PHOENIX-5486 Projections from local indexes return garbage.
---
 .../src/main/java/org/apache/phoenix/iterate/ExplainTable.java| 2 +-
 .../src/main/java/org/apache/phoenix/schema/MetaDataClient.java   | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
index 2671044..e53b084 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
@@ -215,7 +215,7 @@ public abstract class ExplainTable {
 private Long getViewIndexValue(PDataType type, byte[] range) {
 boolean useLongViewIndex = 
MetaDataUtil.getViewIndexIdDataType().equals(type);
 Object s = type.toObject(range);
-return (useLongViewIndex ? (Long) s : (Short) s) - (useLongViewIndex ? 
Long.MAX_VALUE : Short.MAX_VALUE);
+return (useLongViewIndex ? (Long) s : (Short) s) + (useLongViewIndex ? 
Long.MAX_VALUE : Short.MAX_VALUE) + 2;
 }
 
 private static class RowKeyValueIterator implements Iterator {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 8a794ac..b28d404 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -1649,7 +1649,7 @@ public class MetaDataClient {
 PrimaryKeyConstraint pk = FACTORY.primaryKey(null, allPkColumns);
 tableProps.put(MetaDataUtil.DATA_TABLE_NAME_PROP_NAME, 
dataTable.getName().getString());
 CreateTableStatement tableStatement = 
FACTORY.createTable(indexTableName, statement.getProps(), columnDefs, pk, 
statement.getSplitNodes(), PTableType.INDEX, statement.ifNotExists(), null, 
null, statement.getBindCount(), null);
-table = createTableInternal(tableStatement, splits, dataTable, 
null, null, MetaDataUtil.getViewIndexIdDataType(),null, null, allocateIndexId, 
statement.getIndexType(), asyncCreatedDate, tableProps, commonFamilyProps);
+table = createTableInternal(tableStatement, splits, dataTable, 
null, null, getViewIndexDataType() ,null, null, allocateIndexId, 
statement.getIndexType(), asyncCreatedDate, tableProps, commonFamilyProps);
 }
 finally {
 deleteMutexCells(physicalSchemaName, physicalTableName, 
acquiredColumnMutexSet);
@@ -2825,7 +2825,7 @@ public class MetaDataClient {
 } else {
 tableUpsert.setBoolean(28, useStatsForParallelizationProp);
 }
-tableUpsert.setInt(29, Types.BIGINT);
+tableUpsert.setInt(29, viewIndexIdType.getSqlType());
 tableUpsert.execute();
 
 if (asyncCreatedDate != null) {



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5486 Projections from local indexes return garbage.

2019-09-20 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 8404666  PHOENIX-5486 Projections from local indexes return garbage.
8404666 is described below

commit 84046661905723107bd27b91d55317249cf0e703
Author: Lars Hofhansl 
AuthorDate: Fri Sep 20 12:54:52 2019 -0700

PHOENIX-5486 Projections from local indexes return garbage.
---
 .../src/main/java/org/apache/phoenix/iterate/ExplainTable.java| 2 +-
 .../src/main/java/org/apache/phoenix/schema/MetaDataClient.java   | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
index 2671044..e53b084 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
@@ -215,7 +215,7 @@ public abstract class ExplainTable {
 private Long getViewIndexValue(PDataType type, byte[] range) {
 boolean useLongViewIndex = 
MetaDataUtil.getViewIndexIdDataType().equals(type);
 Object s = type.toObject(range);
-return (useLongViewIndex ? (Long) s : (Short) s) - (useLongViewIndex ? 
Long.MAX_VALUE : Short.MAX_VALUE);
+return (useLongViewIndex ? (Long) s : (Short) s) + (useLongViewIndex ? 
Long.MAX_VALUE : Short.MAX_VALUE) + 2;
 }
 
 private static class RowKeyValueIterator implements Iterator {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 3bc7d53..5ae53bb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -1648,7 +1648,7 @@ public class MetaDataClient {
 PrimaryKeyConstraint pk = FACTORY.primaryKey(null, allPkColumns);
 tableProps.put(MetaDataUtil.DATA_TABLE_NAME_PROP_NAME, 
dataTable.getName().getString());
 CreateTableStatement tableStatement = 
FACTORY.createTable(indexTableName, statement.getProps(), columnDefs, pk, 
statement.getSplitNodes(), PTableType.INDEX, statement.ifNotExists(), null, 
null, statement.getBindCount(), null);
-table = createTableInternal(tableStatement, splits, dataTable, 
null, null, MetaDataUtil.getViewIndexIdDataType(),null, null, allocateIndexId, 
statement.getIndexType(), asyncCreatedDate, tableProps, commonFamilyProps);
+table = createTableInternal(tableStatement, splits, dataTable, 
null, null, getViewIndexDataType() ,null, null, allocateIndexId, 
statement.getIndexType(), asyncCreatedDate, tableProps, commonFamilyProps);
 }
 finally {
 deleteMutexCells(physicalSchemaName, physicalTableName, 
acquiredColumnMutexSet);
@@ -2824,7 +2824,7 @@ public class MetaDataClient {
 } else {
 tableUpsert.setBoolean(28, useStatsForParallelizationProp);
 }
-tableUpsert.setInt(29, Types.BIGINT);
+tableUpsert.setInt(29, viewIndexIdType.getSqlType());
 tableUpsert.execute();
 
 if (asyncCreatedDate != null) {



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5486 Projections from local indexes return garbage.

2019-09-20 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 4b9fee2  PHOENIX-5486 Projections from local indexes return garbage.
4b9fee2 is described below

commit 4b9fee2dcf3d99e056c272d3590117fcb9cbb3c4
Author: Lars Hofhansl 
AuthorDate: Fri Sep 20 12:54:52 2019 -0700

PHOENIX-5486 Projections from local indexes return garbage.
---
 .../src/main/java/org/apache/phoenix/iterate/ExplainTable.java| 2 +-
 .../src/main/java/org/apache/phoenix/schema/MetaDataClient.java   | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
index 2671044..e53b084 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
@@ -215,7 +215,7 @@ public abstract class ExplainTable {
 private Long getViewIndexValue(PDataType type, byte[] range) {
 boolean useLongViewIndex = 
MetaDataUtil.getViewIndexIdDataType().equals(type);
 Object s = type.toObject(range);
-return (useLongViewIndex ? (Long) s : (Short) s) - (useLongViewIndex ? 
Long.MAX_VALUE : Short.MAX_VALUE);
+return (useLongViewIndex ? (Long) s : (Short) s) + (useLongViewIndex ? 
Long.MAX_VALUE : Short.MAX_VALUE) + 2;
 }
 
 private static class RowKeyValueIterator implements Iterator {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 70e68e3..04fdfec 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -1662,7 +1662,7 @@ public class MetaDataClient {
 PrimaryKeyConstraint pk = FACTORY.primaryKey(null, allPkColumns);
 tableProps.put(MetaDataUtil.DATA_TABLE_NAME_PROP_NAME, 
dataTable.getName().getString());
 CreateTableStatement tableStatement = 
FACTORY.createTable(indexTableName, statement.getProps(), columnDefs, pk, 
statement.getSplitNodes(), PTableType.INDEX, statement.ifNotExists(), null, 
null, statement.getBindCount(), null);
-table = createTableInternal(tableStatement, splits, dataTable, 
null, null, MetaDataUtil.getViewIndexIdDataType(),null, null, allocateIndexId, 
statement.getIndexType(), asyncCreatedDate, tableProps, commonFamilyProps);
+table = createTableInternal(tableStatement, splits, dataTable, 
null, null, getViewIndexDataType() ,null, null, allocateIndexId, 
statement.getIndexType(), asyncCreatedDate, tableProps, commonFamilyProps);
 }
 finally {
 deleteMutexCells(physicalSchemaName, physicalTableName, 
acquiredColumnMutexSet);
@@ -2838,7 +2838,7 @@ public class MetaDataClient {
 } else {
 tableUpsert.setBoolean(28, useStatsForParallelizationProp);
 }
-tableUpsert.setInt(29, Types.BIGINT);
+tableUpsert.setInt(29, viewIndexIdType.getSqlType());
 tableUpsert.execute();
 
 if (asyncCreatedDate != null) {



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5486 Projections from local indexes return garbage.

2019-09-20 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 9fa0ee1  PHOENIX-5486 Projections from local indexes return garbage.
9fa0ee1 is described below

commit 9fa0ee1c56175641d37a886fa2b309bd7d4b0d80
Author: Lars Hofhansl 
AuthorDate: Fri Sep 20 12:54:52 2019 -0700

PHOENIX-5486 Projections from local indexes return garbage.
---
 .../src/main/java/org/apache/phoenix/iterate/ExplainTable.java| 2 +-
 .../src/main/java/org/apache/phoenix/schema/MetaDataClient.java   | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
index 2671044..e53b084 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
@@ -215,7 +215,7 @@ public abstract class ExplainTable {
 private Long getViewIndexValue(PDataType type, byte[] range) {
 boolean useLongViewIndex = 
MetaDataUtil.getViewIndexIdDataType().equals(type);
 Object s = type.toObject(range);
-return (useLongViewIndex ? (Long) s : (Short) s) - (useLongViewIndex ? 
Long.MAX_VALUE : Short.MAX_VALUE);
+return (useLongViewIndex ? (Long) s : (Short) s) + (useLongViewIndex ? 
Long.MAX_VALUE : Short.MAX_VALUE) + 2;
 }
 
 private static class RowKeyValueIterator implements Iterator {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 3bc7d53..5ae53bb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -1648,7 +1648,7 @@ public class MetaDataClient {
 PrimaryKeyConstraint pk = FACTORY.primaryKey(null, allPkColumns);
 tableProps.put(MetaDataUtil.DATA_TABLE_NAME_PROP_NAME, 
dataTable.getName().getString());
 CreateTableStatement tableStatement = 
FACTORY.createTable(indexTableName, statement.getProps(), columnDefs, pk, 
statement.getSplitNodes(), PTableType.INDEX, statement.ifNotExists(), null, 
null, statement.getBindCount(), null);
-table = createTableInternal(tableStatement, splits, dataTable, 
null, null, MetaDataUtil.getViewIndexIdDataType(),null, null, allocateIndexId, 
statement.getIndexType(), asyncCreatedDate, tableProps, commonFamilyProps);
+table = createTableInternal(tableStatement, splits, dataTable, 
null, null, getViewIndexDataType() ,null, null, allocateIndexId, 
statement.getIndexType(), asyncCreatedDate, tableProps, commonFamilyProps);
 }
 finally {
 deleteMutexCells(physicalSchemaName, physicalTableName, 
acquiredColumnMutexSet);
@@ -2824,7 +2824,7 @@ public class MetaDataClient {
 } else {
 tableUpsert.setBoolean(28, useStatsForParallelizationProp);
 }
-tableUpsert.setInt(29, Types.BIGINT);
+tableUpsert.setInt(29, viewIndexIdType.getSqlType());
 tableUpsert.execute();
 
 if (asyncCreatedDate != null) {



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5406 addendum: Add missing license header.

2019-09-18 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new fda8583  PHOENIX-5406 addendum: Add missing license header.
fda8583 is described below

commit fda858375e88a21474f0c2cd9bb8692e5b448f47
Author: Lars Hofhansl 
AuthorDate: Wed Sep 18 09:51:15 2019 -0700

PHOENIX-5406 addendum: Add missing license header.
---
 .../org/apache/phoenix/index/IndexUpgradeToolTest.java  | 17 +
 1 file changed, 17 insertions(+)

diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java
index e985479..d158b0d 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.index;
 
 import static org.apache.phoenix.mapreduce.index.IndexUpgradeTool.ROLLBACK_OP;



[phoenix] branch master updated: PHOENIX-5406 addendum: Add missing license header.

2019-09-18 Thread larsh
This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new c43a3e3  PHOENIX-5406 addendum: Add missing license header.
c43a3e3 is described below

commit c43a3e30246127bfd5139b9e556906d6be7a8c0f
Author: Lars Hofhansl 
AuthorDate: Wed Sep 18 09:51:15 2019 -0700

PHOENIX-5406 addendum: Add missing license header.
---
 .../org/apache/phoenix/index/IndexUpgradeToolTest.java  | 17 +
 1 file changed, 17 insertions(+)

diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java
index e985479..d158b0d 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.index;
 
 import static org.apache.phoenix.mapreduce.index.IndexUpgradeTool.ROLLBACK_OP;



  1   2   3   4   5   6   7   >