wombatu-kun commented on code in PR #18403:
URL: https://github.com/apache/hudi/pull/18403#discussion_r3180518479


##########
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/execution/datasources/lance/SparkLanceReaderBase.scala:
##########
@@ -23,37 +23,47 @@ import org.apache.hudi.SparkAdapterSupport.sparkAdapter
 import org.apache.hudi.common.config.{HoodieReaderConfig, HoodieStorageConfig}
 import org.apache.hudi.common.schema.{HoodieSchema, HoodieSchemaType}
 import org.apache.hudi.common.util
-import org.apache.hudi.common.util.collection.ClosableIterator
+import org.apache.hudi.common.util.collection.{ClosableIterator, Pair => 
HoodiePair}
 import org.apache.hudi.internal.schema.InternalSchema
 import org.apache.hudi.io.memory.HoodieArrowAllocator
-import org.apache.hudi.io.storage.{BlobDescriptorTransform, 
LanceRecordIterator, VectorConversionUtils}
+import org.apache.hudi.io.storage.{BlobDescriptorTransform, 
LanceBatchIterator, LanceRecordIterator, VectorConversionUtils}
 import org.apache.hudi.storage.StorageConfiguration
 
+import org.apache.arrow.memory.BufferAllocator
+import org.apache.arrow.vector.FieldVector
+import org.apache.arrow.vector.ipc.ArrowReader
 import org.apache.hadoop.conf.Configuration
 import org.apache.parquet.schema.MessageType
 import org.apache.spark.TaskContext
 import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.catalyst.expressions.{AttributeReference, 
JoinedRow, UnsafeProjection, UnsafeRow}
 import 
org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
 import org.apache.spark.sql.execution.datasources.{PartitionedFile, 
SparkColumnarFileReader, SparkSchemaTransformUtils}
+import org.apache.spark.sql.execution.vectorized.{OnHeapColumnVector, 
WritableColumnVector}
 import org.apache.spark.sql.internal.SQLConf
 import org.apache.spark.sql.sources.Filter
-import org.apache.spark.sql.types.{ArrayType, DataType, MapType, StructField, 
StructType}
+import org.apache.spark.sql.types._
 import org.apache.spark.sql.util.LanceArrowUtils
+import org.apache.spark.sql.vectorized.{ColumnarBatch, ColumnVector}
 import org.lance.file.{BlobReadMode, FileReadOptions, LanceFileReader}
+import org.lance.spark.vectorized.LanceArrowColumnVector
 
-import java.io.IOException
+import java.io.{Closeable, IOException}
 
 import scala.collection.JavaConverters._
 
 /**
  * Reader for Lance files in Spark datasource.
- * Implements vectorized reading using LanceArrowColumnVector.
+ * Supports both row-based and columnar batch reading modes.
  *
- * @param enableVectorizedReader whether to use vectorized reading (currently 
always true for Lance)
+ * @param enableVectorizedReader when true, returns ColumnarBatch for 
vectorized processing;
+ *                               when false, returns InternalRow one by one
  */
 class SparkLanceReaderBase(enableVectorizedReader: Boolean) extends 
SparkColumnarFileReader {
 
+  /** Holds a pre-created all-null Arrow vector for a column missing from the 
file (schema evolution). */
+  private case class NullColumnEntry(colIndex: Int, columnVector: 
LanceArrowColumnVector, arrowVector: FieldVector)
+
   // Batch size for reading Lance files (number of rows per batch)

Review Comment:
   Renamed to `lanceColumnVector` in 104a52f2 (more specific than `sparkVector` 
— flags it as the Lance-Arrow wrapper rather than just any Spark vector).



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to