This is an automated email from the ASF dual-hosted git repository.

korlov pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/ignite-3.git


The following commit(s) were added to refs/heads/main by this push:
     new 23be8976135 IGNITE-25551 Sql. Explain. Improve test coverage of sender 
and receiver (#6052)
23be8976135 is described below

commit 23be89761354ffbd5a2121241ce2970ec56a3233
Author: Max Zhuravkov <[email protected]>
AuthorDate: Thu Jun 19 14:21:26 2025 +0300

    IGNITE-25551 Sql. Explain. Improve test coverage of sender and receiver 
(#6052)
---
 .../sql/group1/explain/mapping.test                | 156 ++++++++++++++++++++-
 1 file changed, 155 insertions(+), 1 deletion(-)

diff --git 
a/modules/sql-engine/src/integrationTest/sql/group1/explain/mapping.test 
b/modules/sql-engine/src/integrationTest/sql/group1/explain/mapping.test
index 8eb3c14b3f8..9c7835205d9 100644
--- a/modules/sql-engine/src/integrationTest/sql/group1/explain/mapping.test
+++ b/modules/sql-engine/src/integrationTest/sql/group1/explain/mapping.test
@@ -2,7 +2,10 @@
 # Mapped fragments need to be placed inside "----" divisors.
 
 statement ok
-CREATE TABLE test_table (c1 INT PRIMARY KEY, c2 INT, c3 INT);
+CREATE ZONE test_zone (PARTITIONS 7) STORAGE PROFILES ['default'];
+
+statement ok
+CREATE TABLE test_table (c1 INT PRIMARY KEY, c2 INT, c3 INT) ZONE test_zone;
 
 explain mapping
 SELECT /*+ DISABLE_RULE('MapReduceSortAggregateConverterRule',
@@ -38,6 +41,62 @@ Fragment#0 root
         est: (rows=1)
 ----
 
+explain mapping
+SELECT /*+ DISABLE_RULE('TableScanToKeyValueGetRule')*/ * FROM test_table 
WHERE c1 = 1
+----
+Fragment#0 root
+  distribution: single
+  executionNodes: [sqllogic0]
+  tree: 
+    Receiver
+        fieldNames: [C1, C2, C3]
+        sourceFragmentId: 1
+        est: (rows=1)
+
+Fragment#1
+  distribution: table PUBLIC.TEST_TABLE in zone TEST_ZONE
+  executionNodes: [sqllogic0]
+  partitions: [TEST_TABLE=[sqllogic0={2}]]
+  tree: 
+    Sender
+        distribution: single
+        targetFragmentId: 0
+        est: (rows=1)
+      TableScan
+          table: PUBLIC.TEST_TABLE
+          predicate: =(C1, 1)
+          fieldNames: [C1, C2, C3]
+          est: (rows=1)
+----
+
+explain mapping
+SELECT * FROM test_table
+----
+Fragment#0 root
+  distribution: single
+  executionNodes: [sqllogic0]
+  tree: 
+    Receiver
+        fieldNames: [C1, C2, C3]
+        sourceFragmentId: 1
+        est: (rows=1)
+
+Fragment#1
+  distribution: table PUBLIC.TEST_TABLE in zone TEST_ZONE
+  executionNodes: [sqllogic1, sqllogic0]
+  partitions: [TEST_TABLE=[sqllogic1={1, 3, 4, 5, 6}, sqllogic0={0, 2}]]
+  tree: 
+    Sender
+        distribution: single
+        targetFragmentId: 0
+        est: (rows=1)
+      TableScan
+          table: PUBLIC.TEST_TABLE
+          fieldNames: [C1, C2, C3]
+          est: (rows=1)
+----
+
+
 explain mapping
 SELECT COUNT(*) FROM test_table
 ----
@@ -79,3 +138,98 @@ Fragment#0 root
         tuples: [[1], [2]]
         est: (rows=2)
 ----
+
+explain mapping
+SELECT * FROM test_table, system.tables WHERE c1 = table_id
+----
+Fragment#0 root
+  distribution: single
+  executionNodes: [sqllogic0]
+  tree: 
+    Project
+        fieldNames: [C1, C2, C3, SCHEMA_NAME, TABLE_NAME, TABLE_ID, 
TABLE_PK_INDEX_ID, ZONE_NAME, STORAGE_PROFILE, TABLE_COLOCATION_COLUMNS, 
SCHEMA_ID, ZONE_ID, SCHEMA, NAME, ID, PK_INDEX_ID, COLOCATION_KEY_INDEX, ZONE]
+        projection: [C1, C2, C3, SCHEMA_NAME, TABLE_NAME, TABLE_ID, 
TABLE_PK_INDEX_ID, ZONE_NAME, STORAGE_PROFILE, TABLE_COLOCATION_COLUMNS, 
SCHEMA_ID, ZONE_ID, SCHEMA, NAME, ID, PK_INDEX_ID, COLOCATION_KEY_INDEX, ZONE]
+        est: (rows=15)
+      HashJoin
+          predicate: =(C1, TABLE_ID)
+          type: inner
+          est: (rows=15)
+        SystemViewScan
+            table: SYSTEM.TABLES
+            fieldNames: [SCHEMA_NAME, TABLE_NAME, TABLE_ID, TABLE_PK_INDEX_ID, 
ZONE_NAME, STORAGE_PROFILE, TABLE_COLOCATION_COLUMNS, SCHEMA_ID, ZONE_ID, 
SCHEMA, NAME, ID, PK_INDEX_ID, COLOCATION_KEY_INDEX, ZONE]
+            est: (rows=100)
+        Receiver
+            fieldNames: [C1, C2, C3]
+            sourceFragmentId: 2
+            est: (rows=1)
+
+Fragment#2
+  distribution: table PUBLIC.TEST_TABLE in zone TEST_ZONE
+  executionNodes: [sqllogic1, sqllogic0]
+  partitions: [TEST_TABLE=[sqllogic1={1, 3, 4, 5, 6}, sqllogic0={0, 2}]]
+  tree: 
+    Sender
+        distribution: single
+        targetFragmentId: 0
+        est: (rows=1)
+      TableScan
+          table: PUBLIC.TEST_TABLE
+          fieldNames: [C1, C2, C3]
+          est: (rows=1)
+----
+
+statement ok
+CREATE ZONE test_zone2 (PARTITIONS 3) STORAGE PROFILES ['default'];
+
+statement ok
+CREATE TABLE test_table2 (c1 INT, c2 INT, c3 INT PRIMARY KEY) ZONE test_zone2;
+
+explain mapping
+SELECT * FROM test_table t1, test_table2 t2 WHERE t1.c1 = t2.c2
+----
+Fragment#0 root
+  distribution: single
+  executionNodes: [sqllogic0]
+  tree: 
+    HashJoin
+        predicate: =(C1, C2$0)
+        fieldNames: [C1, C2, C3, C1$0, C2$0, C3$0]
+        type: inner
+        est: (rows=1)
+      Receiver
+          fieldNames: [C1, C2, C3]
+          sourceFragmentId: 1
+          est: (rows=1)
+      Receiver
+          fieldNames: [C1, C2, C3]
+          sourceFragmentId: 2
+          est: (rows=1)
+
+Fragment#1
+  distribution: table PUBLIC.TEST_TABLE in zone TEST_ZONE
+  executionNodes: [sqllogic1, sqllogic0]
+  partitions: [TEST_TABLE=[sqllogic1={1, 3, 4, 5, 6}, sqllogic0={0, 2}]]
+  tree: 
+    Sender
+        distribution: single
+        targetFragmentId: 0
+        est: (rows=1)
+      TableScan
+          table: PUBLIC.TEST_TABLE
+          fieldNames: [C1, C2, C3]
+          est: (rows=1)
+
+Fragment#2
+  distribution: table PUBLIC.TEST_TABLE2 in zone TEST_ZONE2
+  executionNodes: [sqllogic1, sqllogic0]
+  partitions: [TEST_TABLE2=[sqllogic1={1}, sqllogic0={0, 2}]]
+  tree: 
+    Sender
+        distribution: single
+        targetFragmentId: 0
+        est: (rows=1)
+      TableScan
+          table: PUBLIC.TEST_TABLE2
+          fieldNames: [C1, C2, C3]
+          est: (rows=1)
+----

Reply via email to