This is an automated email from the ASF dual-hosted git repository.

korlov pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/ignite-3.git


The following commit(s) were added to refs/heads/main by this push:
     new a0ec2ec341e IGNITE-28453 Avoid index key materialization during index 
row verification (#7932)
a0ec2ec341e is described below

commit a0ec2ec341eeddff67d87fb0a72e8c76a9dc3f91
Author: korlov42 <[email protected]>
AuthorDate: Tue Apr 7 16:17:07 2026 +0300

    IGNITE-28453 Avoid index key materialization during index row verification 
(#7932)
---
 .../internal/binarytuple/BinaryTuplePrinter.java   | 123 ++++++++++++++++
 .../binarytuple/BinaryTuplePrinterTest.java        | 147 +++++++++++++++++++
 .../benchmark/SqlIndexScanBenchmarkV2.java         | 154 ++++++++++++++++++++
 .../ignite/internal/schema/BinaryRowConverter.java |  75 ++++++++++
 .../ignite/internal/schema/ColumnsExtractor.java   |  14 ++
 .../benchmarks/BinaryRowConverterBenchmark.java    | 156 +++++++++++++++++++++
 .../internal/schema/BinaryRowConverterTest.java    | 141 +++++++++++++++++++
 .../index/TableRowToIndexKeyConverter.java         |   5 +
 .../distributed/index/VersionedConverter.java      |   5 +
 .../replicator/PartitionReplicaListener.java       |   4 +-
 10 files changed, 821 insertions(+), 3 deletions(-)

diff --git 
a/modules/binary-tuple/src/main/java/org/apache/ignite/internal/binarytuple/BinaryTuplePrinter.java
 
b/modules/binary-tuple/src/main/java/org/apache/ignite/internal/binarytuple/BinaryTuplePrinter.java
new file mode 100644
index 00000000000..fc3875d3a10
--- /dev/null
+++ 
b/modules/binary-tuple/src/main/java/org/apache/ignite/internal/binarytuple/BinaryTuplePrinter.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.binarytuple;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+import org.apache.ignite.internal.type.DecimalNativeType;
+import org.apache.ignite.internal.type.NativeType;
+import org.apache.ignite.internal.type.StructNativeType;
+import org.apache.ignite.internal.util.StringUtils;
+
+/**
+ * Produces a human-readable string representation of a {@link BinaryTuple} 
given a {@link StructNativeType} that describes the row schema.
+ *
+ * <p>Intended for debugging and logging purposes only.
+ */
+public class BinaryTuplePrinter {
+    private final StructNativeType schema;
+
+    /** Constructor. */
+    public BinaryTuplePrinter(StructNativeType schema) {
+        this.schema = schema;
+    }
+
+    /**
+     * Renders the given tuple as a string.
+     *
+     * @param tuple Binary tuple.
+     * @return Human-readable representation.
+     */
+    public String print(BinaryTuple tuple) {
+        return printTuple(tuple);
+    }
+
+    /**
+     * Renders the given tuple as a string.
+     *
+     * @param tuple Byte buffer representing the binary tuple.
+     * @return Human-readable representation.
+     */
+    public String print(ByteBuffer tuple) {
+        return printTuple(new BinaryTupleReader(schema.fieldsCount(), tuple));
+    }
+
+    private String printTuple(BinaryTupleReader reader) {
+        List<StructNativeType.Field> fields = schema.fields();
+
+        StringBuilder sb = new StringBuilder("BinaryTuple[");
+
+        for (int i = 0; i < fields.size(); i++) {
+            if (i > 0) {
+                sb.append(", ");
+            }
+
+            StructNativeType.Field field = fields.get(i);
+            sb.append(field.name()).append('=');
+
+            if (reader.hasNullValue(i)) {
+                sb.append("null");
+            } else {
+                sb.append(readValue(reader, i, field.type()));
+            }
+        }
+
+        return sb.append(']').toString();
+    }
+
+    private static Object readValue(BinaryTupleReader reader, int index, 
NativeType type) {
+        switch (type.spec()) {
+            case BOOLEAN:
+                return reader.booleanValue(index);
+            case INT8:
+                return reader.byteValue(index);
+            case INT16:
+                return reader.shortValue(index);
+            case INT32:
+                return reader.intValue(index);
+            case INT64:
+                return reader.longValue(index);
+            case FLOAT:
+                return reader.floatValue(index);
+            case DOUBLE:
+                return reader.doubleValue(index);
+            case DECIMAL:
+                return reader.decimalValue(index, ((DecimalNativeType) 
type).scale());
+            case UUID:
+                return reader.uuidValue(index);
+            case STRING:
+                return "'" + reader.stringValue(index) + "'";
+            case BYTE_ARRAY:
+                return StringUtils.toHexString(reader.bytesValue(index));
+            case DATE:
+                return reader.dateValue(index);
+            case TIME:
+                return reader.timeValue(index);
+            case DATETIME:
+                return reader.dateTimeValue(index);
+            case TIMESTAMP:
+                return reader.timestampValue(index);
+            case DURATION:
+                return reader.durationValue(index);
+            case PERIOD:
+                return reader.periodValue(index);
+            default:
+                return "?(" + type.spec() + ")";
+        }
+    }
+}
diff --git 
a/modules/binary-tuple/src/test/java/org/apache/ignite/internal/binarytuple/BinaryTuplePrinterTest.java
 
b/modules/binary-tuple/src/test/java/org/apache/ignite/internal/binarytuple/BinaryTuplePrinterTest.java
new file mode 100644
index 00000000000..5af2a34c85a
--- /dev/null
+++ 
b/modules/binary-tuple/src/test/java/org/apache/ignite/internal/binarytuple/BinaryTuplePrinterTest.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.binarytuple;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import java.nio.ByteBuffer;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import org.apache.ignite.internal.type.NativeTypes;
+import org.apache.ignite.internal.type.StructNativeType;
+import org.apache.ignite.internal.util.StringUtils;
+import org.junit.jupiter.api.Test;
+
+/** Tests for {@link BinaryTuplePrinter}. */
+class BinaryTuplePrinterTest {
+
+    @Test
+    void printScalarFields() {
+        StructNativeType schema = NativeTypes.structBuilder()
+                .addField("id", NativeTypes.INT32, false)
+                .addField("score", NativeTypes.INT64, false)
+                .addField("flag", NativeTypes.BOOLEAN, false)
+                .build();
+
+        BinaryTuple tuple = new BinaryTuple(3,
+                new BinaryTupleBuilder(3)
+                        .appendInt(7)
+                        .appendLong(42L)
+                        .appendBoolean(true)
+                        .build()
+        );
+
+        String result = new BinaryTuplePrinter(schema).print(tuple);
+
+        assertEquals("BinaryTuple[id=7, score=42, flag=true]", result);
+    }
+
+    @Test
+    void printStringIsQuoted() {
+        StructNativeType schema = NativeTypes.structBuilder()
+                .addField("name", NativeTypes.STRING, false)
+                .build();
+
+        BinaryTuple tuple = new BinaryTuple(1,
+                new BinaryTupleBuilder(1).appendString("hello world").build()
+        );
+
+        assertEquals("BinaryTuple[name='hello world']", new 
BinaryTuplePrinter(schema).print(tuple));
+    }
+
+    @Test
+    void printNullFieldRendersAsNull() {
+        StructNativeType schema = NativeTypes.structBuilder()
+                .addField("id", NativeTypes.INT32, false)
+                .addField("name", NativeTypes.STRING, true)
+                .build();
+
+        BinaryTuple tuple = new BinaryTuple(2,
+                new BinaryTupleBuilder(2).appendInt(1).appendNull().build()
+        );
+
+        assertEquals("BinaryTuple[id=1, name=null]", new 
BinaryTuplePrinter(schema).print(tuple));
+    }
+
+    @Test
+    void printByteArrayRenderedAsHex() {
+        StructNativeType schema = NativeTypes.structBuilder()
+                .addField("data", NativeTypes.BYTES, false)
+                .build();
+
+        @SuppressWarnings("NumericCastThatLosesPrecision")
+        byte[] bytes = {0x01, (byte) 0xAB, (byte) 0xFF};
+
+        BinaryTuple tuple = new BinaryTuple(1,
+                new BinaryTupleBuilder(1).appendBytes(bytes).build()
+        );
+
+        String expected = "BinaryTuple[data=" + StringUtils.toHexString(bytes) 
+ "]";
+        assertEquals(expected, new BinaryTuplePrinter(schema).print(tuple));
+    }
+
+    @Test
+    void printDateTimeFields() {
+        LocalDate date = LocalDate.of(2024, 6, 15);
+        LocalTime time = LocalTime.of(10, 30, 0);
+        LocalDateTime dateTime = LocalDateTime.of(date, time);
+
+        StructNativeType schema = NativeTypes.structBuilder()
+                .addField("d", NativeTypes.DATE, false)
+                .addField("t", NativeTypes.time(0), false)
+                .addField("dt", NativeTypes.datetime(0), false)
+                .build();
+
+        BinaryTuple tuple = new BinaryTuple(3,
+                new BinaryTupleBuilder(3)
+                        .appendDate(date)
+                        .appendTime(time)
+                        .appendDateTime(dateTime)
+                        .build()
+        );
+
+        assertEquals(
+                "BinaryTuple[d=" + date + ", t=" + time + ", dt=" + dateTime + 
"]",
+                new BinaryTuplePrinter(schema).print(tuple)
+        );
+    }
+
+    @Test
+    void printByteBufferOverloadMatchesBinaryTupleOverload() {
+        StructNativeType schema = NativeTypes.structBuilder()
+                .addField("x", NativeTypes.INT32, false)
+                .addField("y", NativeTypes.STRING, true)
+                .build();
+
+        ByteBuffer buf = new 
BinaryTupleBuilder(2).appendInt(99).appendString("test").build();
+        BinaryTuple tuple = new BinaryTuple(2, buf);
+
+        BinaryTuplePrinter printer = new BinaryTuplePrinter(schema);
+        assertEquals(printer.print(tuple), printer.print(buf));
+    }
+
+    @Test
+    void printEmptySchemaProducesEmptyBrackets() {
+        StructNativeType schema = NativeTypes.structBuilder().build();
+
+        BinaryTuple tuple = new BinaryTuple(0, new 
BinaryTupleBuilder(0).build());
+
+        assertEquals("BinaryTuple[]", new 
BinaryTuplePrinter(schema).print(tuple));
+    }
+}
diff --git 
a/modules/runner/src/integrationTest/java/org/apache/ignite/internal/benchmark/SqlIndexScanBenchmarkV2.java
 
b/modules/runner/src/integrationTest/java/org/apache/ignite/internal/benchmark/SqlIndexScanBenchmarkV2.java
new file mode 100644
index 00000000000..3c2d36b2645
--- /dev/null
+++ 
b/modules/runner/src/integrationTest/java/org/apache/ignite/internal/benchmark/SqlIndexScanBenchmarkV2.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.benchmark;
+
+import java.nio.file.Files;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.IntStream;
+import org.apache.ignite.internal.util.SubscriptionUtils;
+import org.apache.ignite.sql.IgniteSql;
+import org.apache.ignite.table.DataStreamerItem;
+import org.apache.ignite.table.DataStreamerOptions;
+import org.apache.ignite.table.Tuple;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Threads;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+import org.openjdk.jmh.runner.Runner;
+import org.openjdk.jmh.runner.RunnerException;
+import org.openjdk.jmh.runner.options.Options;
+import org.openjdk.jmh.runner.options.OptionsBuilder;
+
+/**
+ * Benchmark that compares sequential scanning of index against full table 
scan.
+ */
+@State(Scope.Benchmark)
+@Fork(1)
+@Threads(1)
+@Warmup(iterations = 10, time = 2)
+@Measurement(iterations = 20, time = 2)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@SuppressWarnings({"WeakerAccess", "unused"})
+public class SqlIndexScanBenchmarkV2 extends AbstractMultiNodeBenchmark {
+    /*
+        By default, cluster's work directory will be created as a temporary 
folder. This implies,
+        that all data generated by benchmark will be cleared automatically. 
However, this also implies
+        that cluster will be recreated on EVERY RUN. To initialize cluster 
once and then reuse it state
+        override `AbstractMultiNodeBenchmark.workDir()` method. Don't forget 
to clear that directory
+        afterwards.
+     */
+
+    private static final String DATASET_READY_MARK_FILE_NAME = "ready.txt";
+
+    private static final String QUERY =
+            "SELECT /*+ FORCE_INDEX(test_val_idx) */ t.* FROM test t WHERE val 
>= ? ORDER BY val LIMIT 100";
+
+    private static final int TABLE_SIZE = 1_500_000;
+
+    @Param("24")
+    private int partitions;
+
+    @Param({"128"})
+    private int prefixLength;
+
+    private IgniteSql sql;
+    private String valuePrefix;
+
+    /** Initializes a schema and fills tables with data. */
+    @Setup
+    public void setUp() throws Exception {
+        try {
+            sql = publicIgnite.sql();
+            valuePrefix = "a".repeat(prefixLength);
+
+            if 
(!Files.exists(workDir().resolve(DATASET_READY_MARK_FILE_NAME))) {
+                sql.executeScript(
+                        "CREATE TABLE test (id INT PRIMARY KEY, val VARCHAR);"
+                                + "CREATE INDEX test_val_idx ON test(val);"
+                );
+
+                CompletableFuture<?> result = 
publicIgnite.tables().table("test")
+                        .recordView()
+                        .streamData(SubscriptionUtils.fromIterable(() -> 
IntStream.range(0, TABLE_SIZE)
+                                .mapToObj(i -> 
DataStreamerItem.of(Tuple.create()
+                                        .set("id", i)
+                                        .set("val", valuePrefix + i))
+                                ).iterator()), DataStreamerOptions.DEFAULT);
+
+                result.get(15, TimeUnit.MINUTES);
+
+                
Files.createFile(workDir().resolve(DATASET_READY_MARK_FILE_NAME));
+            }
+        } catch (Exception e) {
+            nodeTearDown();
+
+            throw e;
+        }
+    }
+
+    /** Measures performance of scan over an index. */
+    @Benchmark
+    public void test(Blackhole bh) {
+        try (var rs = sql.execute(QUERY, valuePrefix + 
ThreadLocalRandom.current().nextInt(TABLE_SIZE - 200))) {
+            while (rs.hasNext()) {
+                bh.consume(rs.next());
+            }
+        }
+    }
+
+    /**
+     * Benchmark's entry point.
+     */
+    public static void main(String[] args) throws RunnerException {
+        Options opt = new OptionsBuilder()
+                .addProfiler("gc")
+                .include(".*" + SqlIndexScanBenchmarkV2.class.getSimpleName() 
+ ".*")
+                .build();
+
+        new Runner(opt).run();
+    }
+
+    @Override
+    protected int nodes() {
+        return 3;
+    }
+
+    @Override
+    protected int partitionCount() {
+        return partitions;
+    }
+
+    @Override
+    protected void createTable(String tableName) {
+        // NO-OP
+    }
+}
+
+
diff --git 
a/modules/schema/src/main/java/org/apache/ignite/internal/schema/BinaryRowConverter.java
 
b/modules/schema/src/main/java/org/apache/ignite/internal/schema/BinaryRowConverter.java
index b7e27eb66a1..a9822b01d74 100644
--- 
a/modules/schema/src/main/java/org/apache/ignite/internal/schema/BinaryRowConverter.java
+++ 
b/modules/schema/src/main/java/org/apache/ignite/internal/schema/BinaryRowConverter.java
@@ -29,6 +29,8 @@ import 
org.apache.ignite.internal.binarytuple.BinaryTupleBuilder;
 import org.apache.ignite.internal.binarytuple.BinaryTupleFormatException;
 import org.apache.ignite.internal.binarytuple.BinaryTupleParser;
 import org.apache.ignite.internal.binarytuple.BinaryTupleParser.Sink;
+import org.apache.ignite.internal.binarytuple.BinaryTupleReader;
+import org.apache.ignite.internal.binarytuple.ByteBufferAccessor;
 import org.apache.ignite.internal.lang.InternalTuple;
 import org.apache.ignite.internal.schema.BinaryTupleSchema.Element;
 import org.jetbrains.annotations.Nullable;
@@ -53,6 +55,79 @@ public class BinaryRowConverter implements ColumnsExtractor {
         this.dstSchema = dstSchema;
     }
 
+    @Override
+    public boolean columnsMatch(BinaryRow tableRow, BinaryTuple indexColumns) {
+        assert srcSchema.convertible();
+
+        ByteBuffer tupleBuffer = tableRow.tupleSlice();
+        BinaryTupleReader rowReader = new BinaryTupleReader(
+                srcSchema.elementCount(), tupleBuffer, 
UnsafeByteBufferAccessor::new
+        );
+        ByteBufferAccessor rowAccessor = rowReader.accessor();
+        // Make sure UnsafeByteBufferAccessor is used.
+        BinaryTupleReader keyReader = new BinaryTupleReader(
+                dstSchema.elementCount(), indexColumns.byteBuffer(), 
UnsafeByteBufferAccessor::new
+        );
+        ByteBufferAccessor keyAccessor = keyReader.accessor();
+
+        for (int i = 0; i < dstSchema.elementCount(); i++) {
+            rowReader.seek(dstSchema.columnIndex(i));
+            keyReader.seek(i);
+
+            int rowElementBegin = rowReader.begin();
+            int keyElementBegin = keyReader.begin();
+            int rowElementLen = rowReader.end() - rowElementBegin;
+            int keyElementLen = keyReader.end() - keyElementBegin;
+
+            if (rowElementLen != keyElementLen) {
+                return false;
+            }
+
+            int probeSize = 8; // Must be aligned with `get` method used 
within the loop.
+            while (rowElementLen >= probeSize) {
+                if (rowAccessor.getLong(rowElementBegin) != 
keyAccessor.getLong(keyElementBegin)) {
+                    return false;
+                }
+
+                rowElementLen -= probeSize;
+                rowElementBegin += probeSize;
+                keyElementBegin += probeSize;
+            }
+
+            boolean res = true;
+            switch (rowElementLen) {
+                case 7:
+                    res = rowAccessor.get(rowElementBegin + 6) == 
keyAccessor.get(keyElementBegin + 6);
+                    // fallthrough
+                case 6:
+                    res = res && rowAccessor.get(rowElementBegin + 5) == 
keyAccessor.get(keyElementBegin + 5);
+                    // fallthrough
+                case 5:
+                    res = res && rowAccessor.get(rowElementBegin + 4) == 
keyAccessor.get(keyElementBegin + 4);
+                    // fallthrough
+                case 4:
+                    res = res && rowAccessor.getInt(rowElementBegin) == 
keyAccessor.getInt(keyElementBegin);
+                    break;
+                case 3:
+                    res = rowAccessor.get(rowElementBegin + 2) == 
keyAccessor.get(keyElementBegin + 2);
+                    // fallthrough
+                case 2:
+                    res = res && rowAccessor.getShort(rowElementBegin) == 
keyAccessor.getShort(keyElementBegin);
+                    break;
+                case 1:
+                    res = rowAccessor.get(rowElementBegin) == 
keyAccessor.get(keyElementBegin);
+                    // fallthrough
+                default: // NO-OP
+            }
+
+            if (!res) {
+                return false;
+            }
+        }
+
+        return true;
+    }
+
     /**
      * Convert a binary row to a binary tuple.
      *
diff --git 
a/modules/schema/src/main/java/org/apache/ignite/internal/schema/ColumnsExtractor.java
 
b/modules/schema/src/main/java/org/apache/ignite/internal/schema/ColumnsExtractor.java
index 15ae0df55bf..abced7dec47 100644
--- 
a/modules/schema/src/main/java/org/apache/ignite/internal/schema/ColumnsExtractor.java
+++ 
b/modules/schema/src/main/java/org/apache/ignite/internal/schema/ColumnsExtractor.java
@@ -31,4 +31,18 @@ public interface ColumnsExtractor {
      * @return Subset of columns, packed into a {@code BinaryTuple}.
      */
     BinaryTuple extractColumns(BinaryRow row);
+
+    /**
+     * Checks whether index columns match the given table row.
+     *
+     * <p>The default implementation extracts columns and compares the 
resulting byte buffer.
+     * Implementations may override this to avoid the allocation.
+     *
+     * @param tableRow Row with data from table.
+     * @param indexColumns Binary tuple representation of indexed columns.
+     * @return {@code true} if the index columns match the table row, {@code 
false} otherwise.
+     */
+    default boolean columnsMatch(BinaryRow tableRow, BinaryTuple indexColumns) 
{
+        return 
extractColumns(tableRow).byteBuffer().equals(indexColumns.byteBuffer());
+    }
 }
diff --git 
a/modules/schema/src/test/java/org/apache/ignite/internal/benchmarks/BinaryRowConverterBenchmark.java
 
b/modules/schema/src/test/java/org/apache/ignite/internal/benchmarks/BinaryRowConverterBenchmark.java
new file mode 100644
index 00000000000..e681243db18
--- /dev/null
+++ 
b/modules/schema/src/test/java/org/apache/ignite/internal/benchmarks/BinaryRowConverterBenchmark.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.benchmarks;
+
+import static java.util.Objects.requireNonNull;
+import static 
org.apache.ignite.internal.testframework.IgniteTestUtils.randomString;
+import static org.apache.ignite.internal.type.NativeTypes.INT64;
+
+import it.unimi.dsi.fastutil.ints.IntList;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+import org.apache.ignite.internal.binarytuple.BinaryTuple;
+import org.apache.ignite.internal.schema.BinaryRow;
+import org.apache.ignite.internal.schema.BinaryRowConverter;
+import org.apache.ignite.internal.schema.Column;
+import org.apache.ignite.internal.schema.SchemaDescriptor;
+import org.apache.ignite.internal.schema.row.RowAssembler;
+import org.apache.ignite.internal.type.NativeTypes;
+import org.apache.ignite.internal.util.Pair;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+import org.openjdk.jmh.runner.Runner;
+import org.openjdk.jmh.runner.RunnerException;
+import org.openjdk.jmh.runner.options.Options;
+import org.openjdk.jmh.runner.options.OptionsBuilder;
+
+/** Benchmark to measure {@link BinaryRowConverter}. */
+@State(Scope.Benchmark)
+@Warmup(time = 1, iterations = 10, timeUnit = TimeUnit.SECONDS)
+@Measurement(time = 1, iterations = 60, timeUnit = TimeUnit.SECONDS)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+@Fork(1)
+public class BinaryRowConverterBenchmark {
+    private BinaryRowConverter converter;
+
+    private int idx;
+    private final List<Pair<BinaryTuple, BinaryRow>> dataset = new 
ArrayList<>();
+
+    /** Initialize. */
+    @Setup
+    public void init() {
+        long seed = System.currentTimeMillis();
+
+        System.err.println("Seed: " + seed);
+
+        Random rnd = new Random(seed);
+
+        SchemaDescriptor schema = new SchemaDescriptor(
+                1,
+                List.of(
+                        new Column("ID", INT64, false),
+                        new Column("C1", INT64, true),
+                        new Column("LONG_COL", INT64, true),
+                        new Column("STRING_COL", NativeTypes.stringOf(256), 
true),
+                        new Column("C4", INT64, true),
+                        new Column("C5", INT64, true)
+                ),
+                IntList.of(0),
+                null
+        );
+
+        converter = BinaryRowConverter.columnsExtractor(
+                schema,
+                requireNonNull(schema.column("STRING_COL")).positionInRow(),
+                requireNonNull(schema.column("LONG_COL")).positionInRow()
+        );
+
+        int baseStringLength = 32;
+        for (int i = 0; i < 10; i++) {
+            BinaryRow row = new RowAssembler(schema, -1)
+                    .appendLong(rnd.nextLong()) // ID
+                    .appendLong(rnd.nextLong()) // C1
+                    .appendLong(rnd.nextLong()) // LONG_COL
+                    .appendString(randomString(rnd, baseStringLength + i)) // 
STRING_COL
+                    .appendLong(rnd.nextLong()) // C4
+                    .appendLong(rnd.nextLong()) // C5
+                    .build();
+
+            BinaryTuple indexColumns = converter.extractColumns(row);
+            dataset.add(new Pair<>(indexColumns, row));
+        }
+    }
+
+    /**
+     * Measure matching avoiding tuple materialization.
+     *
+     * @param bh Black hole.
+     */
+    @Benchmark
+    public void onlyMatch(Blackhole bh) {
+        Pair<BinaryTuple, BinaryRow> pair = dataset.get(nextIdx());
+
+        bh.consume(converter.columnsMatch(pair.getSecond(), pair.getFirst()));
+    }
+
+    /**
+     * Measure matching through tuple materialization.
+     *
+     * @param bh Black hole.
+     */
+    @Benchmark
+    public void extractAndMatch(Blackhole bh) {
+        Pair<BinaryTuple, BinaryRow> pair = dataset.get(nextIdx());
+
+        
bh.consume(converter.extractColumns(pair.getSecond()).byteBuffer().equals(pair.getFirst().byteBuffer()));
+    }
+
+    private int nextIdx() {
+        idx++;
+        if (idx >= dataset.size()) {
+            idx = 0;
+        }
+
+        return idx;
+    }
+
+    /**
+     * Benchmark run method.
+     */
+    public static void main(String[] args) throws RunnerException {
+        Options opt = new OptionsBuilder()
+                .addProfiler("gc")
+                .include(BinaryRowConverterBenchmark.class.getSimpleName())
+                .build();
+
+        new Runner(opt).run();
+    }
+
+}
diff --git 
a/modules/schema/src/test/java/org/apache/ignite/internal/schema/BinaryRowConverterTest.java
 
b/modules/schema/src/test/java/org/apache/ignite/internal/schema/BinaryRowConverterTest.java
index 19dc07c2f41..42255b22629 100644
--- 
a/modules/schema/src/test/java/org/apache/ignite/internal/schema/BinaryRowConverterTest.java
+++ 
b/modules/schema/src/test/java/org/apache/ignite/internal/schema/BinaryRowConverterTest.java
@@ -18,6 +18,8 @@
 package org.apache.ignite.internal.schema;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.nio.ByteBuffer;
 import java.time.LocalDate;
@@ -76,6 +78,7 @@ public class BinaryRowConverterTest extends 
BaseIgniteAbstractTest {
 
             assertEquals(col1, dstSchema.value(tuple, 0));
             assertEquals(col2, dstSchema.value(tuple, 1));
+            assertTrue(columnsExtractor.columnsMatch(binaryRow, tuple));
         }
 
         {
@@ -85,6 +88,7 @@ public class BinaryRowConverterTest extends 
BaseIgniteAbstractTest {
             BinaryTuple tuple = columnsExtractor.extractColumns(binaryRow);
             assertEquals(col2, dstSchema.value(tuple, 0));
             assertEquals(col3, dstSchema.value(tuple, 1));
+            assertTrue(columnsExtractor.columnsMatch(binaryRow, tuple));
         }
 
         {
@@ -94,6 +98,143 @@ public class BinaryRowConverterTest extends 
BaseIgniteAbstractTest {
             BinaryTuple tuple = columnsExtractor.extractColumns(binaryRow);
             assertEquals(col2, dstSchema.value(tuple, 0));
             assertEquals(col4, dstSchema.value(tuple, 1));
+            assertTrue(columnsExtractor.columnsMatch(binaryRow, tuple));
+        }
+    }
+
+    @Test
+    public void columnsMatchReturnsTrueWhenExtractedTupleIsUsed() {
+        // For any row, columnsMatch(row, extractColumns(row)) must be true —
+        // this is the fundamental contract the method is built on.
+        List<Column> columnList = Arrays.asList(
+                new Column("C1".toUpperCase(Locale.ROOT), NativeTypes.INT32, 
false),
+                new Column("C2".toUpperCase(Locale.ROOT), NativeTypes.STRING, 
false),
+                new Column("C3".toUpperCase(Locale.ROOT), NativeTypes.INT64, 
false)
+        );
+        SchemaDescriptor schema = new SchemaDescriptor(1, columnList, 
List.of("C1"), null);
+
+        int col1 = random.nextInt();
+        String col2 = "hello-" + random.nextInt();
+        long col3 = random.nextLong();
+
+        ByteBuffer buffer = new BinaryTupleBuilder(3, 64)
+                .appendInt(col1)
+                .appendString(col2)
+                .appendLong(col3)
+                .build();
+        BinaryRow row = new BinaryRowImpl(schema.version(), buffer);
+
+        // Single column subset.
+        BinaryRowConverter singleCol = 
BinaryRowConverter.columnsExtractor(schema, 1);
+        assertTrue(singleCol.columnsMatch(row, singleCol.extractColumns(row)));
+
+        // Two-column subset.
+        BinaryRowConverter twoCol = 
BinaryRowConverter.columnsExtractor(schema, 0, 2);
+        assertTrue(twoCol.columnsMatch(row, twoCol.extractColumns(row)));
+
+        // All columns.
+        BinaryRowConverter allCols = 
BinaryRowConverter.columnsExtractor(schema, 0, 1, 2);
+        assertTrue(allCols.columnsMatch(row, allCols.extractColumns(row)));
+    }
+
+    @Test
+    public void columnsMatchReturnsFalseWhenValueDiffers() {
+        List<Column> columnList = Arrays.asList(
+                new Column("C1".toUpperCase(Locale.ROOT), NativeTypes.INT32, 
false),
+                new Column("C2".toUpperCase(Locale.ROOT), NativeTypes.INT64, 
false),
+                new Column("C3".toUpperCase(Locale.ROOT), NativeTypes.STRING, 
false)
+        );
+        SchemaDescriptor schema = new SchemaDescriptor(1, columnList, 
List.of("C1"), null);
+
+        int col1 = 42;
+        long col2 = 100L;
+        String col3 = "abc";
+
+        ByteBuffer buffer = new BinaryTupleBuilder(3, 32)
+                .appendInt(col1)
+                .appendLong(col2)
+                .appendString(col3)
+                .build();
+        BinaryRow row = new BinaryRowImpl(schema.version(), buffer);
+
+        // Build a tuple where C1 has a different integer value.
+        BinaryRowConverter extractor = 
BinaryRowConverter.columnsExtractor(schema, 0);
+        BinaryTuple differentInt = new BinaryTuple(1,
+                new BinaryTupleBuilder(1).appendInt(col1 + 1).build());
+        assertFalse(extractor.columnsMatch(row, differentInt));
+
+        // Build a tuple where C2 has a different long value.
+        BinaryRowConverter extractor2 = 
BinaryRowConverter.columnsExtractor(schema, 1);
+        BinaryTuple differentLong = new BinaryTuple(1,
+                new BinaryTupleBuilder(1).appendLong(col2 + 1).build());
+        assertFalse(extractor2.columnsMatch(row, differentLong));
+
+        // Build a tuple where C3 has a different string value.
+        BinaryRowConverter extractor3 = 
BinaryRowConverter.columnsExtractor(schema, 2);
+        BinaryTuple differentStr = new BinaryTuple(1,
+                new BinaryTupleBuilder(1).appendString(col3 + "x").build());
+        assertFalse(extractor3.columnsMatch(row, differentStr));
+    }
+
+    @Test
+    public void columnsMatchHandlesNullValues() {
+        List<Column> columnList = Arrays.asList(
+                new Column("ID".toUpperCase(Locale.ROOT), NativeTypes.INT32, 
false),
+                new Column("VAL".toUpperCase(Locale.ROOT), NativeTypes.INT32, 
true)
+        );
+        SchemaDescriptor schema = new SchemaDescriptor(1, columnList, 
List.of("ID"), null);
+
+        // Row where both columns are null.
+        ByteBuffer buffer = new BinaryTupleBuilder(2)
+                .appendInt(1)
+                .appendNull()
+                .build();
+        BinaryRow row = new BinaryRowImpl(schema.version(), buffer);
+
+        // A tuple also carrying null for the same column → match.
+        BinaryRowConverter extractor = 
BinaryRowConverter.columnsExtractor(schema, 1);
+        BinaryTuple nullTuple = new BinaryTuple(1, new 
BinaryTupleBuilder(1).appendNull().build());
+        assertTrue(extractor.columnsMatch(row, nullTuple));
+
+        // A tuple carrying a non-null value → no match.
+        BinaryTuple nonNullTuple = new BinaryTuple(1,
+                new BinaryTupleBuilder(1).appendInt(0).build());
+        assertFalse(extractor.columnsMatch(row, nonNullTuple));
+
+        // Row with a non-null value, tuple with null → no match.
+        ByteBuffer buffer2 = new BinaryTupleBuilder(2, 16)
+                .appendInt(7)
+                .appendString("hello")
+                .build();
+        BinaryRow nonNullRow = new BinaryRowImpl(schema.version(), buffer2);
+        assertFalse(extractor.columnsMatch(nonNullRow, nullTuple));
+    }
+
+    @Test
+    public void columnsMatchAndExtractColumnsAreConsistent() {
+        // Verify that for any randomly generated row, the result of 
extractColumns
+        // always satisfies columnsMatch — i.e., the two methods agree on 
equality.
+        List<Column> columnList = Arrays.asList(
+                new Column("C1".toUpperCase(Locale.ROOT), NativeTypes.INT32, 
false),
+                new Column("C2".toUpperCase(Locale.ROOT), NativeTypes.INT64, 
false),
+                new Column("C3".toUpperCase(Locale.ROOT), NativeTypes.STRING, 
false),
+                new Column("C4".toUpperCase(Locale.ROOT), NativeTypes.DATE, 
false)
+        );
+        SchemaDescriptor schema = new SchemaDescriptor(1, columnList, 
List.of("C1"), null);
+        BinaryRowConverter extractor = 
BinaryRowConverter.columnsExtractor(schema, 1, 3);
+
+        for (int i = 0; i < 20; i++) {
+            ByteBuffer buffer = new BinaryTupleBuilder(4, 64)
+                    .appendInt(random.nextInt())
+                    .appendLong(random.nextLong())
+                    .appendString(String.valueOf(random.nextInt()))
+                    .appendDate(LocalDate.ofEpochDay(random.nextInt(10000)))
+                    .build();
+            BinaryRow row = new BinaryRowImpl(schema.version(), buffer);
+
+            BinaryTuple extracted = extractor.extractColumns(row);
+            assertTrue(extractor.columnsMatch(row, extracted),
+                    "columnsMatch must return true for a tuple produced by 
extractColumns");
         }
     }
 
diff --git 
a/modules/table/src/main/java/org/apache/ignite/internal/table/distributed/index/TableRowToIndexKeyConverter.java
 
b/modules/table/src/main/java/org/apache/ignite/internal/table/distributed/index/TableRowToIndexKeyConverter.java
index ded1f73e331..7522f5db305 100644
--- 
a/modules/table/src/main/java/org/apache/ignite/internal/table/distributed/index/TableRowToIndexKeyConverter.java
+++ 
b/modules/table/src/main/java/org/apache/ignite/internal/table/distributed/index/TableRowToIndexKeyConverter.java
@@ -45,6 +45,11 @@ class TableRowToIndexKeyConverter implements 
ColumnsExtractor {
         return converter(row).extractColumns(row);
     }
 
+    @Override
+    public boolean columnsMatch(BinaryRow tableRow, BinaryTuple indexColumns) {
+        return converter(tableRow).columnsMatch(tableRow, indexColumns);
+    }
+
     private ColumnsExtractor converter(BinaryRow row) {
         int schemaVersion = row.schemaVersion();
 
diff --git 
a/modules/table/src/main/java/org/apache/ignite/internal/table/distributed/index/VersionedConverter.java
 
b/modules/table/src/main/java/org/apache/ignite/internal/table/distributed/index/VersionedConverter.java
index 717fd28195a..a47f937d840 100644
--- 
a/modules/table/src/main/java/org/apache/ignite/internal/table/distributed/index/VersionedConverter.java
+++ 
b/modules/table/src/main/java/org/apache/ignite/internal/table/distributed/index/VersionedConverter.java
@@ -40,6 +40,11 @@ class VersionedConverter implements ColumnsExtractor {
         return delegate.extractColumns(row);
     }
 
+    @Override
+    public boolean columnsMatch(BinaryRow tableRow, BinaryTuple indexColumns) {
+        return delegate.columnsMatch(tableRow, indexColumns);
+    }
+
     int version() {
         return version;
     }
diff --git 
a/modules/table/src/main/java/org/apache/ignite/internal/table/distributed/replicator/PartitionReplicaListener.java
 
b/modules/table/src/main/java/org/apache/ignite/internal/table/distributed/replicator/PartitionReplicaListener.java
index 17420561b21..0f816b845b0 100644
--- 
a/modules/table/src/main/java/org/apache/ignite/internal/table/distributed/replicator/PartitionReplicaListener.java
+++ 
b/modules/table/src/main/java/org/apache/ignite/internal/table/distributed/replicator/PartitionReplicaListener.java
@@ -1448,9 +1448,7 @@ public class PartitionReplicaListener implements 
ReplicaTableProcessor {
      * @return {@code true} if index row matches the binary row, {@code false} 
otherwise.
      */
     private static boolean indexRowMatches(IndexRow indexRow, BinaryRow 
binaryRow, TableSchemaAwareIndexStorage schemaAwareIndexStorage) {
-        BinaryTuple actualIndexRow = 
schemaAwareIndexStorage.indexRowResolver().extractColumns(binaryRow);
-
-        return 
indexRow.indexColumns().byteBuffer().equals(actualIndexRow.byteBuffer());
+        return 
schemaAwareIndexStorage.indexRowResolver().columnsMatch(binaryRow, 
indexRow.indexColumns());
     }
 
     private CompletableFuture<Void> continueIndexLookup(


Reply via email to