comphead commented on code in PR #3731:
URL: https://github.com/apache/datafusion-comet/pull/3731#discussion_r2968195638


##########
spark/src/main/java/org/apache/comet/CometShuffleBlockIterator.java:
##########
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.comet;
+
+import java.io.Closeable;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.channels.Channels;
+import java.nio.channels.ReadableByteChannel;
+
+/**
+ * Provides raw compressed shuffle blocks to native code via JNI.
+ *
+ * <p>Reads block headers (compressed length + field count) from a shuffle 
InputStream and loads the
+ * compressed body into a DirectByteBuffer. Native code pulls blocks by 
calling hasNext() and
+ * getBuffer().
+ *
+ * <p>The DirectByteBuffer returned by getBuffer() is only valid until the 
next hasNext() call.
+ * Native code must fully consume it (via read_ipc_compressed which allocates 
new memory for the
+ * decompressed data) before pulling the next block.
+ */
+public class CometShuffleBlockIterator implements Closeable {
+
+  private static final int INITIAL_BUFFER_SIZE = 128 * 1024;
+
+  private final ReadableByteChannel channel;
+  private final InputStream inputStream;
+  private final ByteBuffer headerBuf = 
ByteBuffer.allocate(16).order(ByteOrder.LITTLE_ENDIAN);
+  private ByteBuffer dataBuf = ByteBuffer.allocateDirect(INITIAL_BUFFER_SIZE);
+  private boolean closed = false;
+  private int currentBlockLength = 0;
+
+  public CometShuffleBlockIterator(InputStream in) {
+    this.inputStream = in;
+    this.channel = Channels.newChannel(in);
+  }
+
+  /**
+   * Reads the next block header and loads the compressed body into the 
internal buffer. Called by
+   * native code via JNI.
+   *
+   * <p>Header format: 8-byte compressedLength (includes field count but not 
itself) + 8-byte
+   * fieldCount (discarded, schema comes from protobuf).
+   *
+   * @return the compressed body length in bytes (codec prefix + compressed 
IPC), or -1 if EOF
+   */
+  public int hasNext() throws IOException {
+    if (closed) {
+      return -1;
+    }
+
+    // Read 16-byte header
+    headerBuf.clear();
+    while (headerBuf.hasRemaining()) {
+      int bytesRead = channel.read(headerBuf);
+      if (bytesRead < 0) {
+        if (headerBuf.position() == 0) {
+          close();
+          return -1;
+        }
+        throw new EOFException("Data corrupt: unexpected EOF while reading 
batch header");
+      }
+    }
+    headerBuf.flip();
+    long compressedLength = headerBuf.getLong();
+    // Field count discarded - schema determined by ShuffleScan protobuf fields
+    headerBuf.getLong();
+
+    // Subtract 8 because compressedLength includes the 8-byte field count we 
already read
+    long bytesToRead = compressedLength - 8;
+    if (bytesToRead > Integer.MAX_VALUE) {
+      throw new IllegalStateException(
+          "Native shuffle block size of "
+              + bytesToRead
+              + " exceeds maximum of "
+              + Integer.MAX_VALUE
+              + ". Try reducing shuffle batch size.");

Review Comment:
   please put the shuffle batch size config param name



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to