This is an automated email from the ASF dual-hosted git repository.
korlov pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/ignite-3.git
The following commit(s) were added to refs/heads/main by this push:
new fc8c948806a IGNITE-26203 Sql. Partition pruning. No metadata for
INSERT INTO table SELECT FROM table (#7703)
fc8c948806a is described below
commit fc8c948806a862b177e9cf62bf1b65aa8329a2d4
Author: Max Zhuravkov <[email protected]>
AuthorDate: Thu Mar 26 13:37:02 2026 +0200
IGNITE-26203 Sql. Partition pruning. No metadata for INSERT INTO table
SELECT FROM table (#7703)
---
.../PartitionAwarenessMetadataExtractor.java | 204 ++++++++--
.../engine/prepare/pruning/ModifyNodeVisitor.java | 151 ++++++-
.../prepare/pruning/PartitionPrunerImpl.java | 2 +-
.../prepare/pruning/PartitionPruningColumns.java | 17 +
.../prepare/pruning/PartitionPruningMetadata.java | 10 +-
.../pruning/PartitionPruningMetadataExtractor.java | 53 ++-
.../planner/PartitionPruningMetadataTest.java | 11 +-
.../sql/engine/planner/PartitionPruningTest.java | 432 ++++++++++++++++++++-
.../PartitionAwarenessMetadataTest.java | 33 +-
.../resources/mapping/test_partition_pruning.test | 212 ++++++++++
10 files changed, 1016 insertions(+), 109 deletions(-)
diff --git
a/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/partitionawareness/PartitionAwarenessMetadataExtractor.java
b/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/partitionawareness/PartitionAwarenessMetadataExtractor.java
index 202dd5e85df..6b6e7aa2614 100644
---
a/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/partitionawareness/PartitionAwarenessMetadataExtractor.java
+++
b/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/partitionawareness/PartitionAwarenessMetadataExtractor.java
@@ -20,7 +20,9 @@ package
org.apache.ignite.internal.sql.engine.prepare.partitionawareness;
import it.unimi.dsi.fastutil.ints.Int2ObjectMap;
import it.unimi.dsi.fastutil.ints.IntArrayList;
import it.unimi.dsi.fastutil.longs.Long2ObjectMap;
+import it.unimi.dsi.fastutil.longs.LongSet;
import java.util.List;
+import java.util.Objects;
import org.apache.calcite.plan.RelOptTable;
import org.apache.calcite.rex.RexDynamicParam;
import org.apache.calcite.rex.RexLiteral;
@@ -28,14 +30,18 @@ import org.apache.calcite.rex.RexNode;
import org.apache.calcite.rex.RexUtil;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.calcite.util.ImmutableIntList;
+import org.apache.ignite.internal.sql.engine.prepare.IgniteRelShuttle;
import org.apache.ignite.internal.sql.engine.prepare.RelWithSources;
import
org.apache.ignite.internal.sql.engine.prepare.pruning.PartitionPruningColumns;
import
org.apache.ignite.internal.sql.engine.prepare.pruning.PartitionPruningMetadata;
+import org.apache.ignite.internal.sql.engine.rel.IgniteIndexScan;
import org.apache.ignite.internal.sql.engine.rel.IgniteKeyValueGet;
import org.apache.ignite.internal.sql.engine.rel.IgniteKeyValueModify;
import
org.apache.ignite.internal.sql.engine.rel.IgniteKeyValueModify.Operation;
import org.apache.ignite.internal.sql.engine.rel.IgniteRel;
import org.apache.ignite.internal.sql.engine.rel.IgniteTableFunctionScan;
+import org.apache.ignite.internal.sql.engine.rel.IgniteTableModify;
+import org.apache.ignite.internal.sql.engine.rel.IgniteTableScan;
import org.apache.ignite.internal.sql.engine.schema.IgniteTable;
import org.apache.ignite.internal.sql.engine.sql.fun.IgniteSqlOperatorTable;
import org.apache.ignite.internal.sql.engine.type.IgniteTypeFactory;
@@ -91,7 +97,7 @@ public class PartitionAwarenessMetadataExtractor {
return getMetadata((IgniteKeyValueGet) rel);
} else if (rel instanceof IgniteKeyValueModify) {
return getMetadata((IgniteKeyValueModify) rel);
- } else if (partitionPruningMetadata != null) {
+ } else if (partitionPruningMetadata != null &&
!partitionPruningMetadata.data().isEmpty()) {
return tryConvertPartitionPruningMetadata(relationWithSources,
partitionPruningMetadata);
} else {
return null;
@@ -178,51 +184,20 @@ public class PartitionAwarenessMetadataExtractor {
RelWithSources relationWithSources,
PartitionPruningMetadata metadata
) {
- // Partition awareness metadata is created once per source table,
- // so we do not consider plan that have more then 1 source.
- if (metadata.data().size() != 1) {
- return null;
- }
-
- Long2ObjectMap.Entry<PartitionPruningColumns> entry = metadata.data()
- .long2ObjectEntrySet()
- .iterator().next();
-
- long sourceId = entry.getLongKey();
- IgniteRel sourceRel = relationWithSources.get(sourceId);
- assert sourceRel != null;
-
- RelOptTable optTable = sourceRel.getTable();
- assert optTable != null;
+ ExtractCompatibleMetadata collector = new
ExtractCompatibleMetadata(metadata);
+ CompatibleMetadata compatibleMetadata =
collector.go(relationWithSources);
- IgniteTable igniteTable = optTable.unwrap(IgniteTable.class);
- assert igniteTable != null;
-
- // Partition pruning (PP) metadata includes information to identify
all possible partitions.
- // However, partition awareness restricts execution to a single
partition,
- // so we should reject PP metadata that has more than one set of
columns.
- //
- // Ignore PP with correlated variables as well, because some queries
- // can access additional partitions.
- PartitionPruningColumns columns = entry.getValue();
- if (columns.columns().size() != 1 ||
columns.containCorrelatedVariables()) {
+ // If partition pruning metadata is not compatible with
partition-awareness, terminate.
+ if (compatibleMetadata == null) {
return null;
}
- boolean dml =
relationWithSources.modifiedTables().contains(igniteTable.id());
- long numSources = numberOfModifyAndSourceRels(relationWithSources);
-
- // Accept queries that have exactly one source rel.
- if (!dml && numSources != 1) {
- return null;
- }
-
- // Accept DMLs that have a ModifyNode and a single source rel.
- if (dml && numSources != 2) {
- return null;
- }
+ PartitionPruningColumns columns = compatibleMetadata.columns;
+ RelOptTable optTable = compatibleMetadata.relOptTable;
+ IgniteTable igniteTable = compatibleMetadata.table;
// Choose appropriate tx mode.
+ boolean dml =
relationWithSources.modifiedTables().contains(igniteTable.id());
DirectTxMode directTxMode = dml ? DirectTxMode.NOT_SUPPORTED :
DirectTxMode.SUPPORTED;
ImmutableIntList colocationKeys = igniteTable.distribution().getKeys();
@@ -299,4 +274,153 @@ public class PartitionAwarenessMetadataExtractor {
return ColocationUtils.hash(internalVal, nativeType);
}
+
+ private static class CompatibleMetadata {
+ final IgniteTable table;
+ final PartitionPruningColumns columns;
+ final RelOptTable relOptTable;
+
+ CompatibleMetadata(IgniteTable table, PartitionPruningColumns columns,
RelOptTable relOptTable) {
+ this.columns = columns;
+ this.relOptTable = relOptTable;
+ this.table = table;
+ }
+ }
+
+ private static class ExtractCompatibleMetadata extends IgniteRelShuttle {
+
+ private final PartitionPruningMetadata metadata;
+ private boolean sameTable;
+ private IgniteTable igniteTable;
+
+ private ExtractCompatibleMetadata(PartitionPruningMetadata metadata) {
+ this.metadata = metadata;
+ }
+
+ @Nullable CompatibleMetadata go(RelWithSources relationWithSources) {
+ PartitionPruningMetadata compatibleMetadata =
tryCollect(relationWithSources.root());
+ if (compatibleMetadata == null) {
+ return null;
+ }
+
+ Long2ObjectMap.Entry<PartitionPruningColumns> entry =
compatibleMetadata.data()
+ .long2ObjectEntrySet()
+ .iterator().next();
+
+ long sourceId = entry.getLongKey();
+
+ IgniteRel sourceRel = relationWithSources.get(sourceId);
+ assert sourceRel != null;
+
+ RelOptTable optTable = sourceRel.getTable();
+ assert optTable != null;
+
+ IgniteTable igniteTable = optTable.unwrap(IgniteTable.class);
+ assert igniteTable != null;
+
+ // Partition pruning (PP) metadata includes information to
identify all possible partitions.
+ // However, partition awareness restricts execution to a single
partition,
+ // so we should reject PP metadata that has more than one set of
columns.
+ //
+ // Ignore PP with correlated variables as well, because some
queries
+ // can access additional partitions.
+ PartitionPruningColumns columns = entry.getValue();
+ if (columns.columns().size() != 1 ||
columns.containCorrelatedVariables()) {
+ return null;
+ }
+
+ boolean dml =
relationWithSources.modifiedTables().contains(igniteTable.id());
+ long numSources = numberOfModifyAndSourceRels(relationWithSources);
+
+ // Accept queries that have exactly one source rel.
+ if (!dml && numSources != 1) {
+ return null;
+ }
+
+ // Accept DMLs that have a ModifyNode and a single source rel.
+ if (dml && numSources != 2) {
+ return null;
+ }
+
+ return new CompatibleMetadata(igniteTable, columns, optTable);
+ }
+
+ private @Nullable PartitionPruningMetadata tryCollect(IgniteRel rel) {
+ // Partition pruning metadata is partition-awareness compatible
iff
+ // all sources refer the same table and all of them have equal PP
columns.
+
+ // If there is only one source, metadata can be check for
compatibility.
+ if (metadata.data().size() == 1) {
+ return metadata;
+ }
+
+ // Check that all metadata is the same.
+ PartitionPruningColumns previous = null;
+ Long firstSourceId = null;
+ boolean allEqual = true;
+
+ for (Long2ObjectMap.Entry<PartitionPruningColumns> entry :
metadata.data().long2ObjectEntrySet()) {
+ long id = entry.getLongKey();
+ PartitionPruningColumns columns = entry.getValue();
+ if (previous == null) {
+ previous = columns;
+ firstSourceId = id;
+ } else if (!previous.equals(columns)) {
+ allEqual = false;
+ }
+ }
+
+ // if metadata is not equal, metadata is not compatible with
partition-awareness.
+ if (!allEqual) {
+ return null;
+ }
+
+ rel.accept(this);
+
+ // If there are multiple tables, metadata is not compatible with
partition-awareness.
+ if (!sameTable) {
+ return null;
+ }
+
+ // Return the first object, since all metadata objects have the
same structure.
+ assert firstSourceId != null;
+ return metadata.subset(LongSet.of(firstSourceId));
+ }
+
+ @Override
+ public IgniteRel visit(IgniteTableModify rel) {
+ checkTable(rel.getTable());
+
+ return super.visit(rel);
+ }
+
+ @Override
+ public IgniteRel visit(IgniteIndexScan rel) {
+ checkTable(rel.getTable());
+
+ return super.visit(rel);
+ }
+
+ @Override
+ public IgniteRel visit(IgniteTableScan rel) {
+ checkTable(rel.getTable());
+
+ return super.visit(rel);
+ }
+
+ private void checkTable(@Nullable RelOptTable table) {
+ if (table == null) {
+ return;
+ }
+ IgniteTable currentTable = table.unwrap(IgniteTable.class);
+ assert currentTable != null;
+
+ if (igniteTable == null) {
+ igniteTable = currentTable;
+ sameTable = true;
+ } else if (!Objects.equals(igniteTable.id(), currentTable.id())) {
+ sameTable = false;
+ }
+ }
+ }
}
diff --git
a/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/ModifyNodeVisitor.java
b/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/ModifyNodeVisitor.java
index b4ee7737c00..1c630515c68 100644
---
a/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/ModifyNodeVisitor.java
+++
b/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/ModifyNodeVisitor.java
@@ -20,12 +20,16 @@ package
org.apache.ignite.internal.sql.engine.prepare.pruning;
import static org.apache.ignite.internal.lang.IgniteStringFormatter.format;
import static org.apache.ignite.internal.util.CollectionUtils.nullOrEmpty;
+import it.unimi.dsi.fastutil.ints.Int2ObjectMap;
import java.util.ArrayList;
import java.util.List;
import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexLocalRef;
import org.apache.calcite.rex.RexNode;
import org.apache.calcite.rex.RexShuttle;
import org.apache.calcite.rex.RexVisitor;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.util.ImmutableIntList;
import org.apache.ignite.internal.logger.IgniteLogger;
import org.apache.ignite.internal.logger.Loggers;
import
org.apache.ignite.internal.sql.engine.rel.IgniteCorrelatedNestedLoopJoin;
@@ -62,6 +66,7 @@ import
org.apache.ignite.internal.sql.engine.rel.agg.IgniteMapSortAggregate;
import org.apache.ignite.internal.sql.engine.rel.agg.IgniteReduceHashAggregate;
import org.apache.ignite.internal.sql.engine.rel.agg.IgniteReduceSortAggregate;
import org.apache.ignite.internal.sql.engine.rel.set.IgniteSetOp;
+import org.apache.ignite.internal.sql.engine.schema.IgniteTable;
import org.apache.ignite.internal.sql.engine.util.Commons;
import org.apache.ignite.internal.util.IgniteUtils;
import org.jetbrains.annotations.Nullable;
@@ -73,7 +78,17 @@ import org.jetbrains.annotations.Nullable;
class ModifyNodeVisitor implements IgniteRelVisitor<List<List<RexNode>>> {
private static final IgniteLogger LOG =
Loggers.forClass(ModifyNodeVisitor.class);
- private ModifyNodeVisitor() {
+ /**
+ * A placeholder used to designate that there is no value for a column. If
PP metadata collection algorithm encounters this placeholder
+ * in a colocation column for a source, then there can be no PP metadata
for this source. Since this is a marker to terminate
+ * algorithm, the type chosen for this expression does not matter.
+ */
+ static final RexNode VALUE_NOT_ASSIGNED = new RexInputRef(99999,
Commons.typeFactory().createSqlType(SqlTypeName.NULL));
+
+ private final PartitionPruningMetadataExtractor extractor;
+
+ private ModifyNodeVisitor(PartitionPruningMetadataExtractor extractor) {
+ this.extractor = extractor;
}
/**
@@ -82,14 +97,24 @@ class ModifyNodeVisitor implements
IgniteRelVisitor<List<List<RexNode>>> {
* <p>Every element from outer list contributes a partition, so after
pruning all partitions defined by these
* expressions must be preserved in the plan.
*
+ * @param extractor Metadata extractor to use.
* @param tableModify A modify node to start traversal from.
* @return A list of expressions. It's guaranteed that returned collection
represents complete set of partitions. Returns {@code null}
* otherwise.
*/
- static @Nullable List<List<RexNode>> go(IgniteTableModify tableModify) {
- var modifyNodeShuttle = new ModifyNodeVisitor();
-
- return modifyNodeShuttle.visit((IgniteRel) tableModify.getInput());
+ static @Nullable List<List<@Nullable RexNode>> go(
+ PartitionPruningMetadataExtractor extractor,
+ IgniteTableModify tableModify
+ ) {
+ switch (tableModify.getOperation()) {
+ case INSERT:
+ case UPDATE:
+ case DELETE:
+ ModifyNodeVisitor visitor = new ModifyNodeVisitor(extractor);
+ return visitor.visit((IgniteRel) tableModify.getInput());
+ default:
+ return null;
+ }
}
@Override
@@ -212,14 +237,22 @@ class ModifyNodeVisitor implements
IgniteRelVisitor<List<List<RexNode>>> {
@Override
public @Nullable List<List<RexNode>> visit(IgniteIndexScan rel) {
- // processing of index scan is not supported at the moment
- return null;
+ extractor.visit(rel);
+
+ IgniteTable table = rel.getTable().unwrap(IgniteTable.class);
+ assert table != null : "No table";
+
+ return extractValuesFromScan(rel.sourceId(), table,
rel.requiredColumns(), rel.projects());
}
@Override
public @Nullable List<List<RexNode>> visit(IgniteTableScan rel) {
- // processing of table scan is not supported at the moment
- return null;
+ extractor.visit(rel);
+
+ IgniteTable table = rel.getTable().unwrap(IgniteTable.class);
+ assert table != null : "No table";
+
+ return extractValuesFromScan(rel.sourceId(), table,
rel.requiredColumns(), rel.projects());
}
@Override
@@ -332,4 +365,104 @@ class ModifyNodeVisitor implements
IgniteRelVisitor<List<List<RexNode>>> {
return null;
}
+
+ private List<List<RexNode>> extractValuesFromScan(
+ long sourceId,
+ IgniteTable table,
+ @Nullable ImmutableIntList requiredColumns,
+ @Nullable List<RexNode> projects
+ ) {
+ PartitionPruningColumns metadata = extractor.result.get(sourceId);
+ List<List<RexNode>> values = metadataToValues(table, metadata);
+
+ // Apply projection formed by requiredColumns if any.
+ if (requiredColumns != null) {
+ List<List<RexNode>> projectedValues = new
ArrayList<>(values.size());
+
+ for (List<RexNode> row : values) {
+ List<RexNode> projectedRow = new
ArrayList<>(requiredColumns.size());
+
+ for (int i = 0; i < requiredColumns.size(); i++) {
+ projectedRow.add(row.get(requiredColumns.getInt(i)));
+ }
+
+ projectedValues.add(projectedRow);
+ }
+
+ values = projectedValues;
+ }
+
+ // Apply projection attached to a scan if any.
+ if (!nullOrEmpty(projects)) {
+ List<List<RexNode>> projectedValues = new
ArrayList<>(values.size());
+
+ for (List<RexNode> inputRow : values) {
+ RexVisitor<RexNode> inputInliner = new RexShuttle() {
+ @Override
+ public RexNode visitLocalRef(RexLocalRef localRef) {
+ return inputRow.get(localRef.getIndex());
+ }
+ };
+ projectedValues.add(inputInliner.visitList(projects));
+ }
+
+ values = projectedValues;
+ }
+
+ return values;
+ }
+
+ private static List<List<RexNode>> metadataToValues(
+ IgniteTable table,
+ PartitionPruningColumns metadata
+ ) {
+ int numColumns = table.descriptor().columnsCount();
+
+ if (metadata == null) {
+ List<RexNode> row = createRowWithNoValues(numColumns);
+ return List.of(row);
+ }
+
+ // Creates a row for each PP metadata entry. Consider the following
example
+ // of a scan predicate that includes colocation columns C1, C2, C3:
+ //
+ // c1 IN (10, ?0, 42) AND c2 = ?1 AND c3 = 99
+ //
+ // It's metadata is:
+ // [c1=10, c2=?1, c3=99]
+ // [c1=?0, c2=?1, c3=99]
+ // [c1=42, c2=?1, c3=99]
+ //
+ // That metadata is equivalent to 3 rows:
+ // [10, ?1, 99]
+ // [?0, ?1, 99]
+ // [42, ?1, 99]
+ //
+
+ List<List<RexNode>> projectionAsValues = new
ArrayList<>(metadata.columns().size());
+
+ for (Int2ObjectMap<RexNode> columns : metadata.columns()) {
+ List<RexNode> row = createRowWithNoValues(numColumns);
+
+ // Then set colocation key columns.
+ for (Int2ObjectMap.Entry<RexNode> entry :
columns.int2ObjectEntrySet()) {
+ int col = entry.getIntKey();
+ row.set(col, entry.getValue());
+ }
+
+ projectionAsValues.add(row);
+ }
+
+ return projectionAsValues;
+ }
+
+ private static List<RexNode> createRowWithNoValues(int numColumns) {
+ List<RexNode> row = new ArrayList<>(numColumns);
+
+ for (int i = 0; i < numColumns; i++) {
+ row.add(VALUE_NOT_ASSIGNED);
+ }
+
+ return row;
+ }
}
diff --git
a/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPrunerImpl.java
b/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPrunerImpl.java
index ed3b3cb9c6e..df8102b1d85 100644
---
a/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPrunerImpl.java
+++
b/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPrunerImpl.java
@@ -61,7 +61,7 @@ public class PartitionPrunerImpl implements PartitionPruner {
continue;
}
- PartitionPruningMetadata fragmentPruningMetadata =
metadata.subset(fragment.tables());
+ PartitionPruningMetadata fragmentPruningMetadata =
metadata.subset(fragment.tables().keySet());
if (fragmentPruningMetadata.data().isEmpty()) {
updatedFragments.add(mappedFragment);
diff --git
a/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPruningColumns.java
b/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPruningColumns.java
index 8f795316ca4..259c0c6af23 100644
---
a/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPruningColumns.java
+++
b/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPruningColumns.java
@@ -26,6 +26,7 @@ import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
+import java.util.Objects;
import java.util.stream.Collectors;
import org.apache.calcite.rex.RexNode;
import org.apache.ignite.internal.sql.engine.externalize.RelJsonReader;
@@ -91,6 +92,22 @@ public class PartitionPruningColumns implements Serializable
{
.collect(Collectors.toList());
}
+ /** {@inheritDoc} */
+ @Override
+ public boolean equals(Object o) {
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ PartitionPruningColumns columns1 = (PartitionPruningColumns) o;
+ return Objects.equals(columns, columns1.columns);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int hashCode() {
+ return Objects.hashCode(columns);
+ }
+
/**
* Serialized form to serialize rex nodes.
*/
diff --git
a/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPruningMetadata.java
b/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPruningMetadata.java
index f8894a3b660..5484036fba7 100644
---
a/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPruningMetadata.java
+++
b/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPruningMetadata.java
@@ -20,8 +20,8 @@ package org.apache.ignite.internal.sql.engine.prepare.pruning;
import it.unimi.dsi.fastutil.longs.Long2ObjectMap;
import it.unimi.dsi.fastutil.longs.Long2ObjectMaps;
import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
+import it.unimi.dsi.fastutil.longs.LongSet;
import java.io.Serializable;
-import org.apache.ignite.internal.sql.engine.schema.IgniteTable;
import org.apache.ignite.internal.tostring.S;
import org.jetbrains.annotations.Nullable;
@@ -67,16 +67,16 @@ public class PartitionPruningMetadata implements
Serializable {
}
/**
- * Returns a subset of this metadata that uses the given tables.
+ * Returns a subset of this metadata that uses the given sources.
*
- * @param tables Tables.
+ * @param sources sources.
* @return Metadata.
*/
- public PartitionPruningMetadata subset(Long2ObjectMap<IgniteTable> tables)
{
+ public PartitionPruningMetadata subset(LongSet sources) {
Long2ObjectMap<PartitionPruningColumns> out = new
Long2ObjectOpenHashMap<>();
for (Long2ObjectMap.Entry<PartitionPruningColumns> e :
data.long2ObjectEntrySet()) {
- if (tables.containsKey(e.getLongKey())) {
+ if (sources.contains(e.getLongKey())) {
out.put(e.getLongKey(), e.getValue());
}
}
diff --git
a/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPruningMetadataExtractor.java
b/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPruningMetadataExtractor.java
index fcc5d066155..d6372817252 100644
---
a/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPruningMetadataExtractor.java
+++
b/modules/sql-engine/src/main/java/org/apache/ignite/internal/sql/engine/prepare/pruning/PartitionPruningMetadataExtractor.java
@@ -17,7 +17,6 @@
package org.apache.ignite.internal.sql.engine.prepare.pruning;
-import static org.apache.calcite.rel.core.TableModify.Operation.INSERT;
import static org.apache.ignite.internal.util.CollectionUtils.nullOrEmpty;
import it.unimi.dsi.fastutil.ints.Int2ObjectArrayMap;
@@ -30,6 +29,7 @@ import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
+import org.apache.calcite.rel.core.TableModify.Operation;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.rex.RexBuilder;
import org.apache.calcite.rex.RexCall;
@@ -44,6 +44,8 @@ import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.fun.SqlStdOperatorTable;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.calcite.util.ImmutableIntList;
+import org.apache.calcite.util.mapping.Mapping;
+import org.apache.calcite.util.mapping.Mappings;
import org.apache.ignite.internal.sql.engine.prepare.IgniteRelShuttle;
import org.apache.ignite.internal.sql.engine.rel.IgniteIndexScan;
import org.apache.ignite.internal.sql.engine.rel.IgniteRel;
@@ -89,7 +91,7 @@ import org.jetbrains.annotations.VisibleForTesting;
*/
public class PartitionPruningMetadataExtractor extends IgniteRelShuttle {
- private final Long2ObjectMap<PartitionPruningColumns> result = new
Long2ObjectOpenHashMap<>();
+ final Long2ObjectMap<PartitionPruningColumns> result = new
Long2ObjectOpenHashMap<>();
/**
* Extracts partition pruning metadata from the given physical plan.
@@ -144,23 +146,35 @@ public class PartitionPruningMetadataExtractor extends
IgniteRelShuttle {
/** {@inheritDoc} */
@Override
public IgniteRel visit(IgniteTableModify rel) {
- if (rel.getOperation() != INSERT) {
- return super.visit(rel);
- }
-
IgniteTable table = rel.getTable().unwrap(IgniteTable.class);
assert table != null;
RexBuilder rexBuilder = rel.getCluster().getRexBuilder();
- List<List<RexNode>> results = ModifyNodeVisitor.go(rel);
+ List<List<RexNode>> results = ModifyNodeVisitor.go(this, rel);
if (results == null) {
return rel;
}
- extractFromValues(rel.sourceId(), table, results, rexBuilder);
+ IntList keysList = distributionKeys(table);
+
+ if (keysList.isEmpty()) {
+ return super.visit(rel);
+ }
+
+ IntList key2Position;
+ if (rel.getOperation() == Operation.DELETE) {
+ ImmutableIntList keyColumns = table.keyColumns();
+ Mapping mapping =
Commons.projectedMapping(table.descriptor().columnsCount(), keyColumns);
+ List<Integer> mappedKeys = Mappings.apply2(mapping, new
ArrayList<>(keysList));
+ key2Position =
IntArrayList.toList(mappedKeys.stream().mapToInt(Integer::intValue));
+ } else {
+ key2Position = keysList;
+ }
+
+ extractFromValues(rel.sourceId(), table, results, rexBuilder,
keysList, key2Position);
return super.visit(rel);
}
@@ -169,28 +183,25 @@ public class PartitionPruningMetadataExtractor extends
IgniteRelShuttle {
long sourceId,
IgniteTable table,
List<List<RexNode>> expressions,
- RexBuilder rexBuilder
+ RexBuilder rexBuilder,
+ IntList keysList,
+ IntList key2PositionList
) {
- IntList keysList = distributionKeys(table);
-
- if (keysList.isEmpty()) {
- return;
- }
-
List<RexNode> andEqNodes = new ArrayList<>();
RelDataType rowTypes = table.getRowType(Commons.typeFactory());
for (List<RexNode> items : expressions) {
List<RexNode> andNodes = new ArrayList<>(keysList.size());
- for (int key : keysList) {
- RexLocalRef ref;
- ref =
rexBuilder.makeLocalRef(rowTypes.getFieldList().get(key).getType(), key);
- RexNode lit = items.get(key);
- if (!isValueExpr(lit)) {
+ for (int i = 0; i < keysList.size(); i++) {
+ int keyPos = key2PositionList.getInt(i);
+ RexNode expr = items.get(keyPos);
+ if (expr == ModifyNodeVisitor.VALUE_NOT_ASSIGNED ||
!isValueExpr(expr)) {
return;
}
- RexNode eq = rexBuilder.makeCall(SqlStdOperatorTable.EQUALS,
ref, lit);
+ int keyIndex = keysList.getInt(i);
+ RexLocalRef ref =
rexBuilder.makeLocalRef(rowTypes.getFieldList().get(keyIndex).getType(),
keyIndex);
+ RexNode eq = rexBuilder.makeCall(SqlStdOperatorTable.EQUALS,
ref, expr);
andNodes.add(eq);
}
if (andNodes.size() > 1) {
diff --git
a/modules/sql-engine/src/test/java/org/apache/ignite/internal/sql/engine/planner/PartitionPruningMetadataTest.java
b/modules/sql-engine/src/test/java/org/apache/ignite/internal/sql/engine/planner/PartitionPruningMetadataTest.java
index a07e006aab9..6ecec6c409a 100644
---
a/modules/sql-engine/src/test/java/org/apache/ignite/internal/sql/engine/planner/PartitionPruningMetadataTest.java
+++
b/modules/sql-engine/src/test/java/org/apache/ignite/internal/sql/engine/planner/PartitionPruningMetadataTest.java
@@ -67,6 +67,13 @@ public class PartitionPruningMetadataTest extends
AbstractPlannerTest {
.distribution(TestBuilders.affinity(List.of(0), 1, 2))
.build());
+ private static final IgniteSchema TABLE_C1_NULLABLE_C2_NULLABLE_C3 =
createSchema(TestBuilders.table().name("T")
+ .addKeyColumn("C1", NativeTypes.INT32)
+ .addColumn("C2", NativeTypes.INT32, true)
+ .addColumn("C3", NativeTypes.INT32, true)
+ .distribution(TestBuilders.affinity(List.of(0), 1, 2))
+ .build());
+
private static final IgniteSchema TABLE_C1_C2 =
createSchema(TestBuilders.table().name("T")
.addKeyColumn("C1", NativeTypes.INT32)
.addKeyColumn("C2", NativeTypes.INT32)
@@ -210,7 +217,9 @@ public class PartitionPruningMetadataTest extends
AbstractPlannerTest {
// expressions are not supported at the moment
CASE_16("t(c1) VALUES (?), (? * 10)", TABLE_C1_NULLABLE_C2),
- CASE_17("t(c1) VALUES (?), (OCTET_LENGTH('TEST'))",
TABLE_C1_NULLABLE_C2)
+ CASE_17("t(c1) VALUES (?), (OCTET_LENGTH('TEST'))",
TABLE_C1_NULLABLE_C2),
+
+ CASE_18("t(c1, c2) SELECT c1, c3 FROM t WHERE c1 = 5",
TABLE_C1_NULLABLE_C2_NULLABLE_C3, "[c1=5]")
;
private final TestCase data;
diff --git
a/modules/sql-engine/src/test/java/org/apache/ignite/internal/sql/engine/planner/PartitionPruningTest.java
b/modules/sql-engine/src/test/java/org/apache/ignite/internal/sql/engine/planner/PartitionPruningTest.java
index 21ce869b421..1716b734bed 100644
---
a/modules/sql-engine/src/test/java/org/apache/ignite/internal/sql/engine/planner/PartitionPruningTest.java
+++
b/modules/sql-engine/src/test/java/org/apache/ignite/internal/sql/engine/planner/PartitionPruningTest.java
@@ -18,10 +18,15 @@
package org.apache.ignite.internal.sql.engine.planner;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.stream.Collectors;
import org.apache.calcite.rex.RexLiteral;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.ignite.internal.sql.engine.framework.TestBuilders;
@@ -29,13 +34,13 @@ import
org.apache.ignite.internal.sql.engine.prepare.IgniteRelShuttle;
import
org.apache.ignite.internal.sql.engine.prepare.pruning.PartitionPruningColumns;
import
org.apache.ignite.internal.sql.engine.prepare.pruning.PartitionPruningMetadata;
import
org.apache.ignite.internal.sql.engine.prepare.pruning.PartitionPruningMetadataExtractor;
+import org.apache.ignite.internal.sql.engine.rel.IgniteKeyValueModify;
import org.apache.ignite.internal.sql.engine.rel.IgniteRel;
import org.apache.ignite.internal.sql.engine.rel.SourceAwareIgniteRel;
import org.apache.ignite.internal.sql.engine.schema.IgniteIndex.Collation;
import org.apache.ignite.internal.sql.engine.schema.IgniteSchema;
import org.apache.ignite.internal.sql.engine.schema.IgniteTable;
import org.apache.ignite.internal.type.NativeTypes;
-import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
/**
@@ -93,14 +98,42 @@ public class PartitionPruningTest extends
AbstractPlannerTest {
.end()
.build();
- PartitionPruningMetadata actual = extractMetadata(
- "SELECT * FROM t WHERE c1 = 42",
- table
- );
+ // SELECT
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "SELECT * FROM t WHERE c1 = 42",
+ table
+ );
- PartitionPruningColumns cols = actual.get(1);
- assertNotNull(cols, "No metadata for source=1");
- assertEquals("[[0=42]]",
PartitionPruningColumns.canonicalForm(cols).toString());
+ expectMetadata(Map.of(1L, "[[0=42]]"), actual);
+ }
+
+ // INSERT
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "INSERT INTO t SELECT c1, c2 FROM t WHERE c1 = 42",
+ table
+ );
+ expectMetadata(Map.of(1L, "[[0=42]]", 2L, "[[0=42]]"), actual);
+ }
+
+ // UPDATE
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "UPDATE t SET c2=99 WHERE c1 = 42",
+ table
+ );
+ expectMetadata(Map.of(1L, "[[0=42]]", 2L, "[[0=42]]"), actual);
+ }
+
+ // DELETE
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "DELETE FROM t WHERE c1 = 42",
+ table
+ );
+ expectMetadata(Map.of(1L, "[[0=42]]", 2L, "[[0=42]]"), actual);
+ }
}
@Test
@@ -211,8 +244,188 @@ public class PartitionPruningTest extends
AbstractPlannerTest {
}
@Test
- @Disabled("https://issues.apache.org/jira/browse/IGNITE-26203")
public void testInsertFromSelect() throws Exception {
+ IgniteTable table1 = TestBuilders.table()
+ .name("T1")
+ .addKeyColumn("C1", NativeTypes.INT32)
+ .addKeyColumn("C2", NativeTypes.INT32)
+ .addColumn("C3", NativeTypes.INT32, true)
+ .distribution(TestBuilders.affinity(List.of(1, 0), 1, 2))
+ .build();
+
+ // Same table: same metadata
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "INSERT INTO t1 SELECT c1, c2, c3 FROM t1 WHERE c2 = 42
and c1 = ?",
+ table1
+ );
+ expectMetadata(Map.of(
+ 1L, "[[0=?0, 1=42]]",
+ 2L, "[[0=?0, 1=42]]"),
+ actual
+ );
+ }
+
+ // Same table
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "INSERT INTO t1 SELECT c1, c2, c3 FROM t1 WHERE c2 IN (42,
99) and c1 = ?",
+ table1
+ );
+ expectMetadata(Map.of(
+ 1L, "[[0=?0, 1=42], [0=?0, 1=99]]",
+ 2L, "[[0=?0, 1=42], [0=?0, 1=99]]"),
+ actual
+ );
+ }
+
+ // Same table: multiple values in both columns
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "INSERT INTO t1 SELECT c1, c2, c3 FROM t1 WHERE c2 IN (42,
99) and c1 IN (?, 10, 56)",
+ table1
+ );
+ expectMetadata(Map.of(
+ 1L, "[[0=?0, 1=42], [0=?0, 1=99], [0=10, 1=42],
[0=10, 1=99], [0=56, 1=42], [0=56, 1=99]]",
+ 2L, "[[0=?0, 1=42], [0=?0, 1=99], [0=10, 1=42],
[0=10, 1=99], [0=56, 1=42], [0=56, 1=99]]"),
+ actual
+ );
+ }
+
+ // Star expansion
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "INSERT INTO t1 SELECT * FROM t1 WHERE c2=42 and c1=?",
+ table1
+ );
+ expectMetadata(Map.of(1L, "[[0=?0, 1=42]]", 2L, "[[0=?0, 1=42]]"),
actual);
+ }
+
+ // Projection with constants but insert order still matches.
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "INSERT INTO t1 SELECT c1, c2, 3 FROM t1 WHERE c2=42 and
c3=99 and c1=?",
+ table1
+ );
+ expectMetadata(Map.of(1L, "[[0=?0, 1=42]]", 2L, "[[0=?0, 1=42]]"),
actual);
+ }
+
+ // Permuting projection does not allow propagation
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "INSERT INTO t1 SELECT c3, c2, c1 FROM t1 WHERE c2=42 and
c1=?",
+ table1
+ );
+ expectMetadata(Map.of(2L, "[[0=?0, 1=42]]"), actual);
+ }
+
+ // Constant replaces C1; C2 comes from scan (42). Both colocation keys
are known at the INSERT site.
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "INSERT INTO t1 SELECT 10, c2, c1 FROM t1 WHERE c2=42 and
c3=99 and c1=?",
+ table1
+ );
+ expectMetadata(Map.of(1L, "[[0=10, 1=42]]", 2L, "[[0=?0, 1=42]]"),
actual);
+ }
+
+ // Constant replaces C1; C2 and C3 are taken from the scan. C1=10 is a
known constant.
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "INSERT INTO t1 SELECT 10, c2, c3 FROM t1 WHERE c2=42 and
c1=?",
+ table1
+ );
+ expectMetadata(Map.of(1L, "[[0=10, 1=42]]", 2L, "[[0=?0, 1=42]]"),
actual);
+ }
+ }
+
+ @Test
+ public void testInsertFromSelectNonSequentialKeyColumns() throws Exception
{
+ IgniteTable table1 = TestBuilders.table()
+ .name("T1")
+ .addColumn("C1", NativeTypes.INT32)
+ .addKeyColumn("C2", NativeTypes.INT32)
+ .addColumn("C3", NativeTypes.INT32)
+ .addKeyColumn("C4", NativeTypes.INT32)
+ .addColumn("C5", NativeTypes.INT32)
+ .distribution(TestBuilders.affinity(List.of(3, 1), 1, 2))
+ .build();
+
+ // Insertion order matches
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "INSERT INTO t1 SELECT 10, c2, c3, c4, 100 FROM t1 WHERE
c5=999 and c2=42 and c3=? and c1=99 and c4=78",
+ table1
+ );
+ expectMetadata(Map.of(1L, "[[1=42, 3=78]]", 2L, "[[1=42, 3=78]]"),
actual);
+ }
+
+ // Insertion order is incorrect
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "INSERT INTO t1 SELECT 10, c3, c1, c4, 100 FROM t1 WHERE
c5=999 and c2=42 and c3=? and c1=99 and c4=78",
+ table1
+ );
+ expectMetadata(Map.of(2L, "[[1=42, 3=78]]"), actual);
+ }
+
+ // C2 and C4 (both colocation keys) come through the projection
correctly; constants 10 and 78 fill other positions.
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "INSERT INTO t1 SELECT 10, c2, c3, 78, 100 FROM t1 WHERE
c5=999 and c2=42 and c3=? and c1=99 and c4=78",
+ table1
+ );
+ expectMetadata(Map.of(1L, "[[1=42, 3=78]]", 2L, "[[1=42, 3=78]]"),
actual);
+ }
+ }
+
+ @Test
+ public void testInsertFromSelectDifferentTables() throws Exception {
+ IgniteTable table1 = TestBuilders.table()
+ .name("T1")
+ .addKeyColumn("C1", NativeTypes.INT32)
+ .addKeyColumn("C2", NativeTypes.INT32)
+ .addColumn("C3", NativeTypes.INT32, true)
+ .distribution(TestBuilders.affinity(List.of(1, 0), 1, 2))
+ .build();
+
+ IgniteTable table2 = TestBuilders.table()
+ .name("T2")
+ .addKeyColumn("C1", NativeTypes.INT32)
+ .addKeyColumn("C2", NativeTypes.INT32)
+ .addColumn("C3", NativeTypes.INT32, true)
+ .distribution(TestBuilders.affinity(List.of(0, 1), 1, 2))
+ .build();
+
+ // Different tables: identity projection — colocation keys propagate
through to the INSERT target.
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "INSERT INTO t1 SELECT c1, c2, c3 FROM t2 WHERE c2=42 and
c1=?",
+ table1, table2
+ );
+ expectMetadata(Map.of(1L, "[[0=?0, 1=42]]", 2L, "[[0=?0, 1=42]]"),
actual);
+ }
+
+ // Different tables: constant replaces C3, C1 and C2 still propagate.
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "INSERT INTO t1 SELECT c1, c2, 100 FROM t2 WHERE c2=42 and
c1=?",
+ table1, table2
+ );
+ expectMetadata(Map.of(1L, "[[0=?0, 1=42]]", 2L, "[[0=?0, 1=42]]"),
actual);
+ }
+
+ // Different tables: constant 99 replaces C1 — INSERT target gets a
fully determined partition.
+ {
+ PartitionPruningMetadata actual = extractMetadata(
+ "INSERT INTO t1 SELECT 99, c2, c3 FROM t2 WHERE c2=42 and
c1=?",
+ table1, table2
+ );
+ expectMetadata(Map.of(1L, "[[0=99, 1=42]]", 2L, "[[0=?0, 1=42]]"),
actual);
+ }
+ }
+
+ @Test
+ public void testDelete() throws Exception {
IgniteTable table = TestBuilders.table()
.name("T")
.addKeyColumn("C1", NativeTypes.INT32)
@@ -221,14 +434,188 @@ public class PartitionPruningTest extends
AbstractPlannerTest {
.distribution(TestBuilders.affinity(List.of(1, 0), 1, 2))
.build();
- PartitionPruningMetadata actual = extractMetadata(
- "INSERT INTO t SELECT * FROM t WHERE c2=1 and c1=2",
- table
- );
+ // Simple
+ {
+ String query = "DELETE FROM t WHERE c1 = 42 and c2=?";
+ PartitionPruningMetadata actual = extractMetadata(
+ query,
+ table
+ );
+ expectMetadata(Map.of(1L, "[[0=42, 1=?0]]", 2L, "[[0=42, 1=?0]]"),
actual);
+ }
- PartitionPruningColumns cols = actual.get(1);
- assertNotNull(cols, "No metadata for source=1");
- assertEquals("[[0=2, 1=1]]",
PartitionPruningColumns.canonicalForm(cols).toString());
+ // Multiple values
+ {
+ String query = "DELETE FROM t WHERE c2 IN (42, ?, 99) and c1=?";
+ PartitionPruningMetadata actual = extractMetadata(
+ query,
+ table
+ );
+ expectMetadata(Map.of(
+ 1L, "[[0=?1, 1=42], [0=?1, 1=99], [0=?1, 1=?0]]",
+ 2L, "[[0=?1, 1=42], [0=?1, 1=99], [0=?1, 1=?0]]"
+ ), actual);
+ }
+
+ {
+ String query = "DELETE FROM t WHERE c2 IN (SELECT c3 FROM t WHERE
c2=? AND c1 IN (42, 99))";
+ PartitionPruningMetadata actual = extractMetadata(
+ query,
+ table
+ );
+ expectMetadata(Map.of(), actual);
+ }
+
+ // Simple selects are not constant-folded
+ {
+ String query = "DELETE FROM t WHERE c2 = (SELECT 42) and c1 =
(SELECT 99)";
+ PartitionPruningMetadata actual = extractMetadata(
+ query,
+ table
+ );
+ expectMetadata(Map.of(), actual);
+ }
+ }
+
+ @Test
+ public void testDeleteShortKey() throws Exception {
+ IgniteTable table = TestBuilders.table()
+ .name("T")
+ .addColumn("C1", NativeTypes.INT32)
+ .addKeyColumn("C2", NativeTypes.INT32)
+ .addColumn("C3", NativeTypes.INT32, true)
+ .distribution(TestBuilders.affinity(List.of(1), 1, 2))
+ .build();
+
+ {
+ String query = "DELETE FROM t WHERE c1 = 42 and c2=? and c3=99";
+ PartitionPruningMetadata actual = extractMetadata(
+ query,
+ table
+ );
+ expectMetadata(Map.of(1L, "[[1=?0]]", 2L, "[[1=?0]]"), actual);
+ }
+
+ // swap columns
+ {
+ String query = "DELETE FROM t WHERE c2=? and c3=99 and c1 = 42";
+ PartitionPruningMetadata actual = extractMetadata(
+ query,
+ table
+ );
+ expectMetadata(Map.of(1L, "[[1=?0]]", 2L, "[[1=?0]]"), actual);
+ }
+
+ // Colocation key is not specified.
+ {
+ String query = "DELETE FROM t WHERE c1 = 42";
+ PartitionPruningMetadata actual = extractMetadata(
+ query,
+ table
+ );
+ expectMetadata(Map.of(), actual);
+ }
+ }
+
+ @Test
+ public void testDeleteKeyWithGaps() throws Exception {
+ IgniteTable table = TestBuilders.table()
+ .name("T")
+ .addColumn("C1", NativeTypes.INT32)
+ .addKeyColumn("C2", NativeTypes.INT32)
+ .addColumn("C3", NativeTypes.INT32, true)
+ .addKeyColumn("C4", NativeTypes.INT32)
+ .addColumn("C5", NativeTypes.INT32, true)
+ .distribution(TestBuilders.affinity(List.of(3, 1), 1, 2))
+ .build();
+
+ {
+ String query = "DELETE FROM t WHERE c1 = ? and c2=? and c4=111 and
c3=?";
+ PartitionPruningMetadata actual = extractMetadata(
+ query,
+ table
+ );
+ expectMetadata(Map.of(1L, "[[1=?1, 3=111]]", 2L, "[[1=?1,
3=111]]"), actual);
+ }
+
+ // swap columns
+ {
+ String query = "DELETE FROM t WHERE c4=111 and c2=? and c3=99 and
c1 = 42";
+ PartitionPruningMetadata actual = extractMetadata(
+ query,
+ table
+ );
+ expectMetadata(Map.of(1L, "[[1=?0, 3=111]]", 2L, "[[1=?0,
3=111]]"), actual);
+ }
+ }
+
+ @Test
+ public void testUpdate() throws Exception {
+ IgniteTable table = TestBuilders.table()
+ .name("T")
+ .addKeyColumn("C1", NativeTypes.INT32)
+ .addKeyColumn("C2", NativeTypes.INT32)
+ .addColumn("C3", NativeTypes.INT32, true)
+ .distribution(TestBuilders.affinity(List.of(1, 0), 1, 2))
+ .build();
+
+ // Simple
+ {
+ String query = "UPDATE t SET c3 = 100 WHERE c1 = 42 and c2=?";
+ PartitionPruningMetadata actual = extractMetadata(
+ query,
+ table
+ );
+ expectMetadata(Map.of(1L, "[[0=42, 1=?0]]", 2L, "[[0=42, 1=?0]]"),
actual);
+ }
+
+ // Multiple values
+ {
+ String query = "UPDATE t SET c3 = 100 WHERE c2 IN (42, 99) and c1
IN (?, 10, 101)";
+ PartitionPruningMetadata actual = extractMetadata(
+ query,
+ table
+ );
+ expectMetadata(Map.of(
+ 1L, "[[0=?0, 1=42], [0=?0, 1=99], [0=10, 1=42], [0=10,
1=99], [0=101, 1=42], [0=101, 1=99]]",
+ 2L, "[[0=?0, 1=42], [0=?0, 1=99], [0=10, 1=42], [0=10,
1=99], [0=101, 1=42], [0=101, 1=99]]"
+ ), actual);
+ }
+
+ // Simple selects are not constant-folded
+ {
+ String query = "UPDATE t SET c3 = 100 WHERE c2 = (SELECT 42) and
c1 = (SELECT 99)";
+ PartitionPruningMetadata actual = extractMetadata(
+ query,
+ table
+ );
+ expectMetadata(Map.of(), actual);
+ }
+ }
+
+ @Test
+ public void insertToSelectKvInsert() throws Exception {
+ IgniteTable table = TestBuilders.table()
+ .name("T")
+ .addKeyColumn("C1", NativeTypes.INT32)
+ .addKeyColumn("C2", NativeTypes.INT32)
+ .addColumn("C3", NativeTypes.INT32, true)
+ .distribution(TestBuilders.affinity(List.of(1, 0), 1, 2))
+ .build();
+
+ {
+ String query = "INSERT INTO t SELECT 42 as c1, 99 as c2, 0 as c3";
+ PartitionPruningMetadata actual = extractMetadata(
+ query,
+ table
+ );
+ assertTrue(actual.data().isEmpty(), "Columns: " + actual);
+
+ // Key value plan
+ IgniteSchema schema = createSchema(table);
+ IgniteRel rel = physicalPlan(query, schema);
+ assertInstanceOf(IgniteKeyValueModify.class, rel);
+ }
}
@Test
@@ -244,13 +631,13 @@ public class PartitionPruningTest extends
AbstractPlannerTest {
.name("T2")
.addKeyColumn("C1", NativeTypes.INT32)
.addColumn("C2", NativeTypes.INT32)
- .distribution(TestBuilders.affinity(List.of(0), 1, 2))
+ .distribution(TestBuilders.affinity(List.of(0), 2, 2))
.build();
PartitionPruningMetadataExtractor extractor = new
PartitionPruningMetadataExtractor();
PartitionPruningMetadata actual = extractMetadata(extractor,
- "SELECT /*+ disable_decorrelation */ * FROM t1 as cor WHERE"
+ "SELECT /*+ disable_decorrelation */ * FROM t1 as cor WHERE"
+ " EXISTS (SELECT 1 FROM t2 WHERE t2.c1 = cor.c1 OR
t2.c1=42)", table1, table2);
PartitionPruningColumns cols = actual.get(2);
@@ -289,4 +676,13 @@ public class PartitionPruningTest extends
AbstractPlannerTest {
}
}
}
+
+ private static void expectMetadata(Map<Long, String> expected,
PartitionPruningMetadata actualMetadata) {
+ Map<Long, String> actualMetadataAsStr =
actualMetadata.data().long2ObjectEntrySet()
+ .stream()
+ .map(e -> Map.entry(e.getLongKey(),
PartitionPruningColumns.canonicalForm(e.getValue()).toString()))
+ .collect(Collectors.toMap(Map.Entry::getKey,
Map.Entry::getValue));
+
+ assertEquals(new TreeMap<>(expected), new
TreeMap<>(actualMetadataAsStr), "Partition pruning metadata");
+ }
}
diff --git
a/modules/sql-engine/src/test/java/org/apache/ignite/internal/sql/engine/prepare/partitionawareness/PartitionAwarenessMetadataTest.java
b/modules/sql-engine/src/test/java/org/apache/ignite/internal/sql/engine/prepare/partitionawareness/PartitionAwarenessMetadataTest.java
index 4f387b1fe4b..e84565b1f77 100644
---
a/modules/sql-engine/src/test/java/org/apache/ignite/internal/sql/engine/prepare/partitionawareness/PartitionAwarenessMetadataTest.java
+++
b/modules/sql-engine/src/test/java/org/apache/ignite/internal/sql/engine/prepare/partitionawareness/PartitionAwarenessMetadataTest.java
@@ -159,21 +159,23 @@ public class PartitionAwarenessMetadataTest extends
BaseIgniteAbstractTest {
// UPDATE
Arguments.of("UPDATE t SET c2=1 WHERE c1=?", meta(new
int[]{0}, new int[0], DirectTxMode.NOT_SUPPORTED)),
Arguments.of("UPDATE t SET c2=? WHERE c1=1", meta(new
int[]{-1}, new int[]{1}, DirectTxMode.NOT_SUPPORTED)),
- Arguments.of("UPDATE t SET c2=? WHERE c1=1 AND c3 IN (SELECT *
FROM SYSTEM_RANGE(1, 100))",
- meta(new int[]{-1}, new int[]{1},
DirectTxMode.NOT_SUPPORTED)),
+ // TODO https://issues.apache.org/jira/browse/IGNITE-28199
Requires single bottom-up traversal
+ // Arguments.of("UPDATE t SET c2=? WHERE c1=1 AND c3 IN
(SELECT * FROM SYSTEM_RANGE(1, 100))",
+ // meta(new int[]{-1}, new int[]{1},
DirectTxMode.NOT_SUPPORTED)),
// Can refer multiple partitions - skip
Arguments.of("UPDATE t SET c2=1 WHERE c1=1 or c1=2", null),
Arguments.of("UPDATE t SET c2=? WHERE c1 IN (?, 2)", null),
// INSERT
- // TODO https://issues.apache.org/jira/browse/IGNITE-26203 No
partition pruning metadata for INSERT INTO ... SELECT
+ // TODO https://issues.apache.org/jira/browse/IGNITE-28201
Sql. Partition Pruning. Arbitrary projections
// Arguments.of("INSERT INTO t SELECT 1 as c1, 2 as c2, x as
c3 FROM SYSTEM_RANGE(1, 100)",
// meta(new int[]{-1}, new int[]{1}),
DirectTxMode.NOT_SUPPORTED),
// DELETE
- Arguments.of("DELETE FROM t WHERE c1=1 AND c2 IN (SELECT *
FROM SYSTEM_RANGE(1, 100)) ",
- meta(new int[]{-1}, new int[]{1},
DirectTxMode.NOT_SUPPORTED)),
+ // TODO https://issues.apache.org/jira/browse/IGNITE-28199
Requires single bottom-up traversal
+ // Arguments.of("DELETE FROM t WHERE c1=1 AND c2 IN (SELECT *
FROM SYSTEM_RANGE(1, 100)) ",
+ // meta(new int[]{-1}, new int[]{1},
DirectTxMode.NOT_SUPPORTED)),
// Can refer multiple partitions - skip
Arguments.of("DELETE FROM t WHERE c1=1 or c1=2", null)
);
@@ -251,20 +253,22 @@ public class PartitionAwarenessMetadataTest extends
BaseIgniteAbstractTest {
Arguments.of("UPDATE t SET c1=? WHERE c3=1 and c2=?",
meta(new int[]{-1}, new int[]{1},
DirectTxMode.NOT_SUPPORTED)
),
- Arguments.of("UPDATE t SET c1=? WHERE c3=1 and c2 IN (SELECT *
FROM SYSTEM_RANGE(1, 100))",
- meta(new int[]{-1}, new int[]{1},
DirectTxMode.NOT_SUPPORTED)
- ),
+ // TODO https://issues.apache.org/jira/browse/IGNITE-28199
Requires single bottom-up traversal
+ // Arguments.of("UPDATE t SET c1=? WHERE c3=1 and c2 IN
(SELECT * FROM SYSTEM_RANGE(1, 100))",
+ // meta(new int[]{-1}, new int[]{1},
DirectTxMode.NOT_SUPPORTED)
+ // ),
// INSERT
- // TODO https://issues.apache.org/jira/browse/IGNITE-26203 No
partition pruning metadata for INSERT INTO ... SELECT
+ // TODO https://issues.apache.org/jira/browse/IGNITE-28201
Sql. Partition Pruning. Arbitrary projections
// Arguments.of("INSERT INTO t SELECT 1 as c1, 2 as c2, 3 as
c3 FROM SYSTEM_RANGE(1, 100)",
// meta(new int[]{-1}, new int[]{3},
DirectTxMode.NOT_SUPPORTED)
// ),
// DELETE
- Arguments.of("DELETE FROM t WHERE c3=3", meta(new int[]{-1},
new int[]{3}, DirectTxMode.NOT_SUPPORTED)),
- Arguments.of("DELETE FROM t WHERE c3=3 and c2 IN (SELECT *
FROM SYSTEM_RANGE(1, 100))",
- meta(new int[]{-1}, new int[]{3},
DirectTxMode.NOT_SUPPORTED))
+ // TODO https://issues.apache.org/jira/browse/IGNITE-28199
Requires single bottom-up traversal
+ // Arguments.of("DELETE FROM t WHERE c3=3 and c2 IN (SELECT *
FROM SYSTEM_RANGE(1, 100))",
+ // meta(new int[]{-1}, new int[]{3},
DirectTxMode.NOT_SUPPORTED))
+ Arguments.of("DELETE FROM t WHERE c3=3", meta(new int[]{-1},
new int[]{3}, DirectTxMode.NOT_SUPPORTED))
);
}
@@ -334,8 +338,9 @@ public class PartitionAwarenessMetadataTest extends
BaseIgniteAbstractTest {
Arguments.of("SELECT * FROM t WHERE c2=? and c3=?", null),
// INSERT
- // TODO https://issues.apache.org/jira/browse/IGNITE-26203 No
partition pruning metadata for INSERT INTO ... SELECT
- // Arguments.of("INSERT INTO t SELECT * FROM t WHERE c1=1 and
c2=2 and c3=3", null)
+ Arguments.of("INSERT INTO t SELECT * FROM t WHERE c1=1 and
c2=2 and c3=3",
+ meta(new int[]{-1, -2, -3}, new int[]{3, 1, 2},
DirectTxMode.NOT_SUPPORTED)
+ ),
// UPDATE
Arguments.of("UPDATE t SET c4=? WHERE c1=? and c2=? and c3=?",
diff --git
a/modules/sql-engine/src/test/resources/mapping/test_partition_pruning.test
b/modules/sql-engine/src/test/resources/mapping/test_partition_pruning.test
index 7fd60a7cf7e..0bafb7ce56e 100644
--- a/modules/sql-engine/src/test/resources/mapping/test_partition_pruning.test
+++ b/modules/sql-engine/src/test/resources/mapping/test_partition_pruning.test
@@ -480,3 +480,215 @@ Fragment#3
fieldNames: [ID, C1]
est: (rows=56847)
---
+# Pruning insert into select same table
+# Partition pruning is applied to both Modify and Scan fragments.
+N1
+INSERT INTO t1_n1n2n3 SELECT * FROM t1_n1n2n3 WHERE id = 42
+---
+Fragment#2 root
+ distribution: single
+ executionNodes: [N1]
+ exchangeSourceNodes: {3=[N3]}
+ colocationGroup[-1]: {nodes=[N1], sourceIds=[-1, 3], assignments={},
partitionsWithConsistencyTokens={N1=[]}}
+ colocationGroup[3]: {nodes=[N1], sourceIds=[-1, 3], assignments={},
partitionsWithConsistencyTokens={N1=[]}}
+ tree:
+ Project
+ fieldNames: [ROWCOUNT]
+ projection: [CAST($f0):BIGINT NOT NULL]
+ est: (rows=1)
+ ColocatedHashAggregate
+ fieldNames: [$f0]
+ group: []
+ aggregation: [$SUM0(ROWCOUNT)]
+ est: (rows=1)
+ Receiver
+ fieldNames: [ROWCOUNT]
+ sourceFragmentId: 3
+ est: (rows=1)
+
+Fragment#3
+ distribution: table PUBLIC.T1_N1N2N3 in zone ZONE_1
+ executionNodes: [N3]
+ targetNodes: [N1]
+ colocationGroup[0]: {nodes=[N3], sourceIds=[0], assignments={part_2=N3:3},
partitionsWithConsistencyTokens={N3=[part_2:3]}}
+ colocationGroup[1]: {nodes=[N3], sourceIds=[1], assignments={part_2=N3:3},
partitionsWithConsistencyTokens={N3=[part_2:3]}}
+ partitions: [T1_N1N2N3=[N3={2}]]
+ tree:
+ Sender
+ distribution: single
+ targetFragmentId: 2
+ est: (rows=1)
+ TableModify
+ table: PUBLIC.T1_N1N2N3
+ fieldNames: [ROWCOUNT]
+ type: INSERT
+ est: (rows=1)
+ TableScan
+ table: PUBLIC.T1_N1N2N3
+ predicate: =(ID, 42)
+ fieldNames: [ID, C1, C2]
+ est: (rows=1)
+---
+# Pruning insert into select same table (insert order changed)
+# Partition pruning is applied to only to the Scan fragment because the scan
projection does not preserve column order.
+N1
+INSERT INTO t1_n1n2n3 SELECT c1, id, c2 FROM t1_n1n2n3 WHERE id = 42
+---
+Fragment#2 root
+ distribution: single
+ executionNodes: [N1]
+ exchangeSourceNodes: {3=[N1, N2, N3]}
+ colocationGroup[-1]: {nodes=[N1], sourceIds=[-1, 3], assignments={},
partitionsWithConsistencyTokens={N1=[]}}
+ colocationGroup[3]: {nodes=[N1], sourceIds=[-1, 3], assignments={},
partitionsWithConsistencyTokens={N1=[]}}
+ tree:
+ Project
+ fieldNames: [ROWCOUNT]
+ projection: [CAST($f0):BIGINT NOT NULL]
+ est: (rows=1)
+ ColocatedHashAggregate
+ fieldNames: [$f0]
+ group: []
+ aggregation: [$SUM0(ROWCOUNT)]
+ est: (rows=1)
+ Receiver
+ fieldNames: [ROWCOUNT]
+ sourceFragmentId: 3
+ est: (rows=1)
+
+Fragment#3
+ distribution: table PUBLIC.T1_N1N2N3 in zone ZONE_1
+ executionNodes: [N1, N2, N3]
+ targetNodes: [N1]
+ exchangeSourceNodes: {4=[N3]}
+ colocationGroup[0]: {nodes=[N1, N2, N3], sourceIds=[0, 4],
assignments={part_0=N1:3, part_1=N2:3, part_2=N3:3},
partitionsWithConsistencyTokens={N1=[part_0:3], N2=[part_1:3], N3=[part_2:3]}}
+ colocationGroup[4]: {nodes=[N1, N2, N3], sourceIds=[0, 4],
assignments={part_0=N1:3, part_1=N2:3, part_2=N3:3},
partitionsWithConsistencyTokens={N1=[part_0:3], N2=[part_1:3], N3=[part_2:3]}}
+ partitions: [T1_N1N2N3=[N1={0}, N2={1}, N3={2}]]
+ tree:
+ Sender
+ distribution: single
+ targetFragmentId: 2
+ est: (rows=1)
+ TableModify
+ table: PUBLIC.T1_N1N2N3
+ fieldNames: [ROWCOUNT]
+ type: INSERT
+ est: (rows=1)
+ Receiver
+ fieldNames: [ID, C1, C2]
+ sourceFragmentId: 4
+ est: (rows=1)
+
+Fragment#4
+ distribution: table PUBLIC.T1_N1N2N3 in zone ZONE_1
+ executionNodes: [N3]
+ targetNodes: [N1, N2, N3]
+ colocationGroup[1]: {nodes=[N3], sourceIds=[1], assignments={part_2=N3:3},
partitionsWithConsistencyTokens={N3=[part_2:3]}}
+ partitions: [T1_N1N2N3=[N3={2}]]
+ tree:
+ Sender
+ distribution: table PUBLIC.T1_N1N2N3 in zone ZONE_1 by [ID]
+ targetFragmentId: 3
+ est: (rows=1)
+ TableScan
+ table: PUBLIC.T1_N1N2N3
+ predicate: =(ID, 42)
+ fieldNames: [ID, C1, C2]
+ est: (rows=1)
+---
+# Pruning in UPDATE is applied to both nodes.
+N1
+UPDATE t1_n1n2n3 SET c1=100 WHERE id = 42
+---
+Fragment#2 root
+ distribution: single
+ executionNodes: [N1]
+ exchangeSourceNodes: {3=[N3]}
+ colocationGroup[-1]: {nodes=[N1], sourceIds=[-1, 3], assignments={},
partitionsWithConsistencyTokens={N1=[]}}
+ colocationGroup[3]: {nodes=[N1], sourceIds=[-1, 3], assignments={},
partitionsWithConsistencyTokens={N1=[]}}
+ tree:
+ Project
+ fieldNames: [ROWCOUNT]
+ projection: [CAST($f0):BIGINT NOT NULL]
+ est: (rows=1)
+ ColocatedHashAggregate
+ fieldNames: [$f0]
+ group: []
+ aggregation: [$SUM0(ROWCOUNT)]
+ est: (rows=1)
+ Receiver
+ fieldNames: [ROWCOUNT]
+ sourceFragmentId: 3
+ est: (rows=1)
+
+Fragment#3
+ distribution: table PUBLIC.T1_N1N2N3 in zone ZONE_1
+ executionNodes: [N3]
+ targetNodes: [N1]
+ colocationGroup[0]: {nodes=[N3], sourceIds=[0], assignments={part_2=N3:3},
partitionsWithConsistencyTokens={N3=[part_2:3]}}
+ colocationGroup[1]: {nodes=[N3], sourceIds=[1], assignments={part_2=N3:3},
partitionsWithConsistencyTokens={N3=[part_2:3]}}
+ partitions: [T1_N1N2N3=[N3={2}]]
+ tree:
+ Sender
+ distribution: single
+ targetFragmentId: 2
+ est: (rows=1)
+ TableModify
+ table: PUBLIC.T1_N1N2N3
+ fieldNames: [ROWCOUNT]
+ type: UPDATE
+ est: (rows=1)
+ TableScan
+ table: PUBLIC.T1_N1N2N3
+ predicate: =(ID, 42)
+ fieldNames: [ID, C1, C2, EXPR$0]
+ projection: [ID, C1, C2, 100]
+ est: (rows=1)
+---
+# Pruning in DELETE is applied to both nodes.
+N1
+DELETE FROM t1_n1n2n3 WHERE id = 42
+---
+Fragment#2 root
+ distribution: single
+ executionNodes: [N1]
+ exchangeSourceNodes: {3=[N3]}
+ colocationGroup[-1]: {nodes=[N1], sourceIds=[-1, 3], assignments={},
partitionsWithConsistencyTokens={N1=[]}}
+ colocationGroup[3]: {nodes=[N1], sourceIds=[-1, 3], assignments={},
partitionsWithConsistencyTokens={N1=[]}}
+ tree:
+ Project
+ fieldNames: [ROWCOUNT]
+ projection: [CAST($f0):BIGINT NOT NULL]
+ est: (rows=1)
+ ColocatedHashAggregate
+ fieldNames: [$f0]
+ group: []
+ aggregation: [$SUM0(ROWCOUNT)]
+ est: (rows=1)
+ Receiver
+ fieldNames: [ROWCOUNT]
+ sourceFragmentId: 3
+ est: (rows=1)
+
+Fragment#3
+ distribution: table PUBLIC.T1_N1N2N3 in zone ZONE_1
+ executionNodes: [N3]
+ targetNodes: [N1]
+ colocationGroup[0]: {nodes=[N3], sourceIds=[0], assignments={part_2=N3:3},
partitionsWithConsistencyTokens={N3=[part_2:3]}}
+ colocationGroup[1]: {nodes=[N3], sourceIds=[1], assignments={part_2=N3:3},
partitionsWithConsistencyTokens={N3=[part_2:3]}}
+ partitions: [T1_N1N2N3=[N3={2}]]
+ tree:
+ Sender
+ distribution: single
+ targetFragmentId: 2
+ est: (rows=1)
+ TableModify
+ table: PUBLIC.T1_N1N2N3
+ fieldNames: [ROWCOUNT]
+ type: DELETE
+ est: (rows=1)
+ TableScan
+ table: PUBLIC.T1_N1N2N3
+ predicate: =(ID, 42)
+ fieldNames: [ID]
+ est: (rows=1)
+---