[10/32] phoenix git commit: PHOENIX-4790 Simplify check for client side delete

2018-10-17 Thread pboado
PHOENIX-4790 Simplify check for client side delete


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/48b5fe61
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/48b5fe61
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/48b5fe61

Branch: refs/heads/4.14-cdh5.14
Commit: 48b5fe616baa7850ffef356a870509da90d1500f
Parents: 8730fa9
Author: James Taylor 
Authored: Tue Jun 19 15:33:21 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:42:05 2018 +0100

--
 .../apache/phoenix/compile/DeleteCompiler.java  | 24 
 1 file changed, 5 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/48b5fe61/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 5f9c76c..78b2db9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -46,7 +46,6 @@ import org.apache.phoenix.execute.AggregatePlan;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.execute.MutationState.MultiRowMutationState;
 import org.apache.phoenix.execute.MutationState.RowMutationState;
-import org.apache.phoenix.filter.SkipScanFilter;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -481,6 +480,7 @@ public class DeleteCompiler {
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, 
'"' + column.getName().getString() + '"', null)));
 }
+boolean noQueryReqd = true;
 // Project all non PK indexed columns so that we can do the proper 
index maintenance
 for (PTable index : table.getIndexes()) {
 IndexMaintainer maintainer = index.getIndexMaintainer(table, 
connection);
@@ -492,6 +492,8 @@ public class DeleteCompiler {
 boolean hasNoColumnFamilies = 
table.getColumnFamilies().isEmpty();
 PColumn column = hasNoColumnFamilies ? 
table.getColumnForColumnName(columnName) : 
table.getColumnFamily(familyName).getPColumnForColumnName(columnName);
 if(!projectedColumns.contains(column)) {
+// We must run a query if any index contains a non pk 
column
+noQueryReqd = false;
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, 
FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), 
'"' + columnName + '"', null)));
 }
@@ -511,7 +513,7 @@ public class DeleteCompiler {
 select = StatementNormalizer.normalize(transformedSelect, 
resolverToBe);
 }
 final boolean hasPreOrPostProcessing = hasPreProcessing || 
hasPostProcessing;
-boolean noQueryReqd = !hasPreOrPostProcessing;
+noQueryReqd &= !hasPreOrPostProcessing;
 // No limit and no sub queries, joins, etc in where clause
 // Can't run on same server for transactional data, as we need the row 
keys for the data
 // that is being upserted for conflict detection purposes.
@@ -550,24 +552,8 @@ public class DeleteCompiler {
 }
 
 runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != 
PTableType.INDEX;
-
-// We need to have all indexed columns available in all immutable 
indexes in order
-// to generate the delete markers from the query. We also cannot have 
any filters
-// except for our SkipScanFilter for point lookups.
-// A simple check of the non existence of a where clause in the parse 
node is not sufficient, as the where clause
-// may have been optimized out. Instead, we check that there's a 
single SkipScanFilter
-// If we can generate a plan for every index, that means all the 
required columns are available in every index,
-// hence we can drive the delete from any of the plans.
-noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size();
-int queryPlanIndex = 0;
-while (noQueryReqd && queryPlanIndex < queryPlans.size()) {
-QueryPlan plan = queryPlans.get(queryPlanIndex++);
-StatementContext context = plan.getContext();
-noQueryReqd &= (!context.getScan().hasFilter()
-|| context.getScan().getFilter() instanceof SkipScanFilter)
- 

[10/32] phoenix git commit: PHOENIX-4790 Simplify check for client side delete

2018-10-17 Thread pboado
PHOENIX-4790 Simplify check for client side delete


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/eb609086
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/eb609086
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/eb609086

Branch: refs/heads/4.14-cdh5.13
Commit: eb60908668e973506f0e027a5da8e96d02ac7573
Parents: 340a4ea
Author: James Taylor 
Authored: Tue Jun 19 15:33:21 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:35:37 2018 +0100

--
 .../apache/phoenix/compile/DeleteCompiler.java  | 24 
 1 file changed, 5 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb609086/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 5f9c76c..78b2db9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -46,7 +46,6 @@ import org.apache.phoenix.execute.AggregatePlan;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.execute.MutationState.MultiRowMutationState;
 import org.apache.phoenix.execute.MutationState.RowMutationState;
-import org.apache.phoenix.filter.SkipScanFilter;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -481,6 +480,7 @@ public class DeleteCompiler {
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, 
'"' + column.getName().getString() + '"', null)));
 }
+boolean noQueryReqd = true;
 // Project all non PK indexed columns so that we can do the proper 
index maintenance
 for (PTable index : table.getIndexes()) {
 IndexMaintainer maintainer = index.getIndexMaintainer(table, 
connection);
@@ -492,6 +492,8 @@ public class DeleteCompiler {
 boolean hasNoColumnFamilies = 
table.getColumnFamilies().isEmpty();
 PColumn column = hasNoColumnFamilies ? 
table.getColumnForColumnName(columnName) : 
table.getColumnFamily(familyName).getPColumnForColumnName(columnName);
 if(!projectedColumns.contains(column)) {
+// We must run a query if any index contains a non pk 
column
+noQueryReqd = false;
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, 
FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), 
'"' + columnName + '"', null)));
 }
@@ -511,7 +513,7 @@ public class DeleteCompiler {
 select = StatementNormalizer.normalize(transformedSelect, 
resolverToBe);
 }
 final boolean hasPreOrPostProcessing = hasPreProcessing || 
hasPostProcessing;
-boolean noQueryReqd = !hasPreOrPostProcessing;
+noQueryReqd &= !hasPreOrPostProcessing;
 // No limit and no sub queries, joins, etc in where clause
 // Can't run on same server for transactional data, as we need the row 
keys for the data
 // that is being upserted for conflict detection purposes.
@@ -550,24 +552,8 @@ public class DeleteCompiler {
 }
 
 runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != 
PTableType.INDEX;
-
-// We need to have all indexed columns available in all immutable 
indexes in order
-// to generate the delete markers from the query. We also cannot have 
any filters
-// except for our SkipScanFilter for point lookups.
-// A simple check of the non existence of a where clause in the parse 
node is not sufficient, as the where clause
-// may have been optimized out. Instead, we check that there's a 
single SkipScanFilter
-// If we can generate a plan for every index, that means all the 
required columns are available in every index,
-// hence we can drive the delete from any of the plans.
-noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size();
-int queryPlanIndex = 0;
-while (noQueryReqd && queryPlanIndex < queryPlans.size()) {
-QueryPlan plan = queryPlans.get(queryPlanIndex++);
-StatementContext context = plan.getContext();
-noQueryReqd &= (!context.getScan().hasFilter()
-|| context.getScan().getFilter() instanceof SkipScanFilter)
- 

[10/32] phoenix git commit: PHOENIX-4790 Simplify check for client side delete

2018-10-17 Thread pboado
PHOENIX-4790 Simplify check for client side delete


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d06fdf4d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d06fdf4d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d06fdf4d

Branch: refs/heads/4.14-cdh5.12
Commit: d06fdf4da5e4177e12825db27ad087c917b093b5
Parents: 2e448d8
Author: James Taylor 
Authored: Tue Jun 19 15:33:21 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:28:29 2018 +0100

--
 .../apache/phoenix/compile/DeleteCompiler.java  | 24 
 1 file changed, 5 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d06fdf4d/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 5f9c76c..78b2db9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -46,7 +46,6 @@ import org.apache.phoenix.execute.AggregatePlan;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.execute.MutationState.MultiRowMutationState;
 import org.apache.phoenix.execute.MutationState.RowMutationState;
-import org.apache.phoenix.filter.SkipScanFilter;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -481,6 +480,7 @@ public class DeleteCompiler {
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, 
'"' + column.getName().getString() + '"', null)));
 }
+boolean noQueryReqd = true;
 // Project all non PK indexed columns so that we can do the proper 
index maintenance
 for (PTable index : table.getIndexes()) {
 IndexMaintainer maintainer = index.getIndexMaintainer(table, 
connection);
@@ -492,6 +492,8 @@ public class DeleteCompiler {
 boolean hasNoColumnFamilies = 
table.getColumnFamilies().isEmpty();
 PColumn column = hasNoColumnFamilies ? 
table.getColumnForColumnName(columnName) : 
table.getColumnFamily(familyName).getPColumnForColumnName(columnName);
 if(!projectedColumns.contains(column)) {
+// We must run a query if any index contains a non pk 
column
+noQueryReqd = false;
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, 
FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), 
'"' + columnName + '"', null)));
 }
@@ -511,7 +513,7 @@ public class DeleteCompiler {
 select = StatementNormalizer.normalize(transformedSelect, 
resolverToBe);
 }
 final boolean hasPreOrPostProcessing = hasPreProcessing || 
hasPostProcessing;
-boolean noQueryReqd = !hasPreOrPostProcessing;
+noQueryReqd &= !hasPreOrPostProcessing;
 // No limit and no sub queries, joins, etc in where clause
 // Can't run on same server for transactional data, as we need the row 
keys for the data
 // that is being upserted for conflict detection purposes.
@@ -550,24 +552,8 @@ public class DeleteCompiler {
 }
 
 runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != 
PTableType.INDEX;
-
-// We need to have all indexed columns available in all immutable 
indexes in order
-// to generate the delete markers from the query. We also cannot have 
any filters
-// except for our SkipScanFilter for point lookups.
-// A simple check of the non existence of a where clause in the parse 
node is not sufficient, as the where clause
-// may have been optimized out. Instead, we check that there's a 
single SkipScanFilter
-// If we can generate a plan for every index, that means all the 
required columns are available in every index,
-// hence we can drive the delete from any of the plans.
-noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size();
-int queryPlanIndex = 0;
-while (noQueryReqd && queryPlanIndex < queryPlans.size()) {
-QueryPlan plan = queryPlans.get(queryPlanIndex++);
-StatementContext context = plan.getContext();
-noQueryReqd &= (!context.getScan().hasFilter()
-|| context.getScan().getFilter() instanceof SkipScanFilter)
- 

[10/32] phoenix git commit: PHOENIX-4790 Simplify check for client side delete

2018-10-17 Thread pboado
PHOENIX-4790 Simplify check for client side delete


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/03a3fb7f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/03a3fb7f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/03a3fb7f

Branch: refs/heads/4.14-cdh5.11
Commit: 03a3fb7f27c4c69050a68fd3d75f3353f6d69f8f
Parents: af49e30
Author: James Taylor 
Authored: Tue Jun 19 15:33:21 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:09:52 2018 +0100

--
 .../apache/phoenix/compile/DeleteCompiler.java  | 24 
 1 file changed, 5 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a3fb7f/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 5f9c76c..78b2db9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -46,7 +46,6 @@ import org.apache.phoenix.execute.AggregatePlan;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.execute.MutationState.MultiRowMutationState;
 import org.apache.phoenix.execute.MutationState.RowMutationState;
-import org.apache.phoenix.filter.SkipScanFilter;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -481,6 +480,7 @@ public class DeleteCompiler {
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, 
'"' + column.getName().getString() + '"', null)));
 }
+boolean noQueryReqd = true;
 // Project all non PK indexed columns so that we can do the proper 
index maintenance
 for (PTable index : table.getIndexes()) {
 IndexMaintainer maintainer = index.getIndexMaintainer(table, 
connection);
@@ -492,6 +492,8 @@ public class DeleteCompiler {
 boolean hasNoColumnFamilies = 
table.getColumnFamilies().isEmpty();
 PColumn column = hasNoColumnFamilies ? 
table.getColumnForColumnName(columnName) : 
table.getColumnFamily(familyName).getPColumnForColumnName(columnName);
 if(!projectedColumns.contains(column)) {
+// We must run a query if any index contains a non pk 
column
+noQueryReqd = false;
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, 
FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), 
'"' + columnName + '"', null)));
 }
@@ -511,7 +513,7 @@ public class DeleteCompiler {
 select = StatementNormalizer.normalize(transformedSelect, 
resolverToBe);
 }
 final boolean hasPreOrPostProcessing = hasPreProcessing || 
hasPostProcessing;
-boolean noQueryReqd = !hasPreOrPostProcessing;
+noQueryReqd &= !hasPreOrPostProcessing;
 // No limit and no sub queries, joins, etc in where clause
 // Can't run on same server for transactional data, as we need the row 
keys for the data
 // that is being upserted for conflict detection purposes.
@@ -550,24 +552,8 @@ public class DeleteCompiler {
 }
 
 runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != 
PTableType.INDEX;
-
-// We need to have all indexed columns available in all immutable 
indexes in order
-// to generate the delete markers from the query. We also cannot have 
any filters
-// except for our SkipScanFilter for point lookups.
-// A simple check of the non existence of a where clause in the parse 
node is not sufficient, as the where clause
-// may have been optimized out. Instead, we check that there's a 
single SkipScanFilter
-// If we can generate a plan for every index, that means all the 
required columns are available in every index,
-// hence we can drive the delete from any of the plans.
-noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size();
-int queryPlanIndex = 0;
-while (noQueryReqd && queryPlanIndex < queryPlans.size()) {
-QueryPlan plan = queryPlans.get(queryPlanIndex++);
-StatementContext context = plan.getContext();
-noQueryReqd &= (!context.getScan().hasFilter()
-|| context.getScan().getFilter() instanceof SkipScanFilter)
-