Repository: phoenix
Updated Branches:
  refs/heads/master e7bcfe4f2 -> cd8e86ca7


PHOENIX-2746 Delete on the table with immutable rows may fail with 
INVALID_FILTER_ON_IMMUTABLE_ROWS error code.(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cd8e86ca
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cd8e86ca
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cd8e86ca

Branch: refs/heads/master
Commit: cd8e86ca7170876a30771fcc16c027f8dc8dd386
Parents: e7bcfe4
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Fri Mar 18 01:28:53 2016 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Fri Mar 18 01:28:53 2016 +0530

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/DeleteIT.java    | 12 +++++++++
 .../apache/phoenix/compile/DeleteCompiler.java  | 28 +++++++++++---------
 .../apache/phoenix/optimize/QueryOptimizer.java | 14 ++++++----
 .../org/apache/phoenix/schema/PTableImpl.java   |  2 +-
 4 files changed, 37 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd8e86ca/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
index 6b4eead..10152e3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.Date;
@@ -370,6 +371,7 @@ public class DeleteIT extends BaseHBaseManagedTimeIT {
                     "STATS.ACTIVE_VISITOR INTEGER " +
                     "CONSTRAINT PK PRIMARY KEY (HOST, DOMAIN, FEATURE, DATE)) 
IMMUTABLE_ROWS=true");
             stm.execute("CREATE " + (localIndex ? "LOCAL" : "") + " INDEX 
web_stats_idx ON web_stats (DATE, FEATURE)");
+            stm.execute("CREATE " + (localIndex ? "LOCAL" : "") + " INDEX 
web_stats_idx2 ON web_stats (DATE, FEATURE, USAGE.DB)");
             stm.close();
 
             Date date = new Date(0);
@@ -406,6 +408,16 @@ public class DeleteIT extends BaseHBaseManagedTimeIT {
             assertTrue(rs.next());
             assertEquals(0, rs.getLong(1));
 
+            stm.execute("DROP INDEX web_stats_idx ON web_stats");
+            stm.execute("DROP INDEX web_stats_idx2 ON web_stats");
+
+            stm.execute("CREATE " + (localIndex ? "LOCAL" : "") + " INDEX 
web_stats_idx ON web_stats (USAGE.DB)");
+            stm.execute("CREATE " + (localIndex ? "LOCAL" : "") + " INDEX 
web_stats_idx2 ON web_stats (USAGE.DB, DATE)");
+            try{
+                psInsert = con.prepareStatement("DELETE FROM web_stats WHERE  
USAGE.DB=2");
+            } catch(Exception e) {
+                fail("There should not be any exception while deleting row");
+            }
         } finally {
             try {
                 con.close();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd8e86ca/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 8e9e1de..24a2add 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -22,6 +22,7 @@ import java.sql.ParameterMetaData;
 import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -233,18 +234,18 @@ public class DeleteCompiler {
         
     }
     
-    private Set<PTable> getNonDisabledImmutableIndexes(TableRef tableRef) {
+    private Map<PTableKey, PTable> getNonDisabledImmutableIndexes(TableRef 
tableRef) {
         PTable table = tableRef.getTable();
         if (table.isImmutableRows() && !table.getIndexes().isEmpty()) {
-            Set<PTable> nonDisabledIndexes = 
Sets.newHashSetWithExpectedSize(table.getIndexes().size());
+            Map<PTableKey, PTable> nonDisabledIndexes = new HashMap<PTableKey, 
PTable>(table.getIndexes().size());
             for (PTable index : table.getIndexes()) {
                 if (index.getIndexState() != PIndexState.DISABLE) {
-                    nonDisabledIndexes.add(index);
+                    nonDisabledIndexes.put(index.getKey(), index);
                 }
             }
             return nonDisabledIndexes;
         }
-        return Collections.emptySet();
+        return Collections.emptyMap();
     }
     
     private class MultiDeleteMutationPlan implements MutationPlan {
@@ -312,8 +313,8 @@ public class DeleteCompiler {
         boolean runOnServer = false;
         SelectStatement select = null;
         ColumnResolver resolverToBe = null;
-        Set<PTable> immutableIndex = Collections.emptySet();
-        DeletingParallelIteratorFactory parallelIteratorFactory = null;
+        Map<PTableKey, PTable> immutableIndex = Collections.emptyMap();
+        DeletingParallelIteratorFactory parallelIteratorFactory;
         QueryPlan dataPlanToBe = null;
         while (true) {
             try {
@@ -406,7 +407,7 @@ public class DeleteCompiler {
                 PTable table = plan.getTableRef().getTable();
                 if (table.getType() == PTableType.INDEX) { // index plans
                     tableRefs[i++] = plan.getTableRef();
-                    immutableIndex.remove(table);
+                    immutableIndex.remove(table.getKey());
                 } else { // data plan
                     /*
                      * If we have immutable indexes that we need to maintain, 
don't execute the data plan
@@ -586,12 +587,7 @@ public class DeleteCompiler {
                 });
             } else {
                 final boolean deleteFromImmutableIndexToo = 
hasImmutableIndexes && !plan.getTableRef().equals(tableRef);
-                if (parallelIteratorFactory != null) {
-                    
parallelIteratorFactory.setRowProjector(plan.getProjector());
-                    parallelIteratorFactory.setTargetTableRef(tableRef);
-                    
parallelIteratorFactory.setSourceTableRef(plan.getTableRef());
-                    
parallelIteratorFactory.setIndexTargetTableRef(deleteFromImmutableIndexToo ? 
plan.getTableRef() : null);
-                }
+                final DeletingParallelIteratorFactory parallelIteratorFactory2 
= parallelIteratorFactory;
                 mutationPlans.add( new MutationPlan() {
                     @Override
                     public ParameterMetaData getParameterMetaData() {
@@ -625,6 +621,12 @@ public class DeleteCompiler {
                             if (!hasLimit) {
                                 Tuple tuple;
                                 long totalRowCount = 0;
+                                if (parallelIteratorFactory2 != null) {
+                                    
parallelIteratorFactory2.setRowProjector(plan.getProjector());
+                                    
parallelIteratorFactory2.setTargetTableRef(tableRef);
+                                    
parallelIteratorFactory2.setSourceTableRef(plan.getTableRef());
+                                    
parallelIteratorFactory2.setIndexTargetTableRef(deleteFromImmutableIndexToo ? 
plan.getTableRef() : null);
+                                }
                                 while ((tuple=iterator.next()) != null) {// 
Runs query
                                     Cell kv = tuple.getValue(0);
                                     totalRowCount += 
PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), 
SortOrder.getDefault());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd8e86ca/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index adc3c2d..40591c2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
@@ -153,7 +153,7 @@ public class QueryOptimizer {
             }
         }
         
-        return hintedPlan == null ? orderPlansBestToWorst(select, plans) : 
plans;
+        return hintedPlan == null ? orderPlansBestToWorst(select, plans, 
stopAtBestPlan) : plans;
     }
     
     private static QueryPlan getHintedQueryPlan(PhoenixStatement statement, 
SelectStatement select, List<PTable> indexes, List<? extends PDatum> 
targetColumns, ParallelIteratorFactory parallelIteratorFactory, List<QueryPlan> 
plans) throws SQLException {
@@ -319,7 +319,7 @@ public class QueryOptimizer {
      * @param plans the list of candidate plans
      * @return list of plans ordered from best to worst.
      */
-    private List<QueryPlan> orderPlansBestToWorst(SelectStatement select, 
List<QueryPlan> plans) {
+    private List<QueryPlan> orderPlansBestToWorst(SelectStatement select, 
List<QueryPlan> plans, boolean stopAtBestPlan) {
         final QueryPlan dataPlan = plans.get(0);
         if (plans.size() == 1) {
             return plans;
@@ -330,10 +330,14 @@ public class QueryOptimizer {
          * keys), then favor those first.
          */
         List<QueryPlan> candidates = 
Lists.newArrayListWithExpectedSize(plans.size());
-        for (QueryPlan plan : plans) {
-            if (plan.getContext().getScanRanges().isPointLookup()) {
-                candidates.add(plan);
+        if (stopAtBestPlan) { // If we're stopping at the best plan, only 
consider point lookups if there are any
+            for (QueryPlan plan : plans) {
+                if (plan.getContext().getScanRanges().isPointLookup()) {
+                    candidates.add(plan);
+                }
             }
+        } else {
+            candidates.addAll(plans);
         }
         /**
          * If we have a plan(s) that removes the order by, choose from among 
these,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd8e86ca/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index bb90c16..8165451 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -86,7 +86,7 @@ import com.sun.istack.NotNull;
  * storing data in a single column (ColumnLayout.SINGLE) or in
  * multiple columns (ColumnLayout.MULTI).
  *
- *
+ * TODO add hashCode and equal methods to check equality of two PTableImpl 
objects.
  * @since 0.1
  */
 public class PTableImpl implements PTable {

Reply via email to