cloud-fan commented on a change in pull request #34575:
URL: https://github.com/apache/spark/pull/34575#discussion_r752960655



##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileScanRDD.scala
##########
@@ -103,6 +116,123 @@ class FileScanRDD(
         context.killTaskIfInterrupted()
         (currentIterator != null && currentIterator.hasNext) || nextIterator()
       }
+
+      ///////////////////////////
+      // FILE METADATA METHODS //
+      ///////////////////////////
+
+      // use to join with an unsafe row, will only be updated when the current 
file is changed
+      @volatile var metadataStructUnsafeRow: UnsafeRow = _
+      // use to append to an internal row, will only be updated when the 
current file is changed
+      @volatile var metadataStructGenericRow: Row = _
+
+      // Create a off/on heap WritableColumnVector
+      private def createColumnVector(numRows: Int, dataType: DataType): 
WritableColumnVector = {
+        if (offHeapColumnVectorEnabled) {
+          new OffHeapColumnVector(numRows, dataType)
+        } else {
+          new OnHeapColumnVector(numRows, dataType)
+        }
+      }
+
+      /**
+       * For each partitioned file, metadata columns for each record in the 
file are exactly same.
+       * Only update metadata columns when `currentFile` is changed.
+       */
+      private def updateMetadataStruct(): Unit =
+        if (metadataStruct.exists(_.sameRef(FILE_METADATA_COLUMNS))) {
+          val meta = metadataStruct.get
+          if (currentFile == null) {
+            metadataStructUnsafeRow = new UnsafeRow(1)
+            metadataStructGenericRow = new GenericRow(1)
+          } else {
+            // make an generic row
+            assert(meta.dataType.isInstanceOf[StructType])
+            metadataStructGenericRow = Row.fromSeq(
+              meta.dataType.asInstanceOf[StructType].names.map {
+                case FILE_PATH => UTF8String.fromString(new 
File(currentFile.filePath).toString)
+                case FILE_NAME => UTF8String.fromString(
+                  currentFile.filePath.split("/").last)
+                case FILE_SIZE => currentFile.fileSize
+                case FILE_MODIFICATION_TIME => currentFile.modificationTime
+                case _ => None // be exhaustive, won't happen
+              }
+            )
+
+            // convert the generic row to an unsafe row
+            val unsafeRowConverter = {
+              val converter = UnsafeProjection.create(
+                Array(FILE_METADATA_COLUMNS.dataType))
+              (row: Row) => {
+                converter(CatalystTypeConverters.convertToCatalyst(row)
+                  .asInstanceOf[InternalRow])
+              }
+            }
+            metadataStructUnsafeRow =
+              unsafeRowConverter(Row.fromSeq(Seq(metadataStructGenericRow)))
+          }
+        }
+
+      /**
+       * Create a writable column vector containing all required metadata 
fields
+       */
+      private def createMetadataStructColumnVector(
+          c: ColumnarBatch, meta: AttributeReference): WritableColumnVector = {
+        val columnVector = createColumnVector(c.numRows(), 
FILE_METADATA_COLUMNS.dataType)
+        val filePathBytes = new File(currentFile.filePath).toString.getBytes
+        val fileNameBytes = currentFile.filePath.split("/").last.getBytes
+        var rowId = 0
+
+        assert(meta.dataType.isInstanceOf[StructType])
+        meta.dataType.asInstanceOf[StructType].names.zipWithIndex.foreach { 
case (name, ind) =>
+          name match {
+            case FILE_PATH =>
+              rowId = 0
+              // use a tight-loop for better performance
+              while (rowId < c.numRows()) {
+                columnVector.getChild(ind).putByteArray(rowId, filePathBytes)
+                rowId += 1
+              }
+            case FILE_NAME =>
+              rowId = 0
+              // use a tight-loop for better performance
+              while (rowId < c.numRows()) {
+                columnVector.getChild(ind).putByteArray(rowId, fileNameBytes)
+                rowId += 1
+              }
+            case FILE_SIZE =>
+              columnVector.getChild(ind).putLongs(0, c.numRows(), 
currentFile.fileSize)
+            case FILE_MODIFICATION_TIME =>
+              columnVector.getChild(ind).putLongs(0, c.numRows(), 
currentFile.modificationTime)
+            case _ => // be exhaustive, won't happen
+          }
+        }
+        columnVector
+      }
+
+      /**
+       * Add metadata struct at the end of nextElement if needed.
+       * For different row implementations, use different methods to update 
and append.
+       */
+      private def addMetadataStructIfNeeded(nextElement: Object): Object =
+        if (metadataStruct.exists(_.sameRef(FILE_METADATA_COLUMNS))) {
+          val meta = metadataStruct.get
+          nextElement match {
+            case c: ColumnarBatch =>
+              val columnVectorArr = Array.tabulate(c.numCols())(c.column) ++
+                Array(createMetadataStructColumnVector(c, meta))
+              new ColumnarBatch(columnVectorArr, c.numRows())
+            case u: UnsafeRow =>
+              val joiner =
+                GenerateUnsafeRowJoiner.create(requiredSchema, 
Seq(meta).toStructType)
+              joiner.join(u, metadataStructUnsafeRow)
+            case i: InternalRow =>
+              InternalRow.fromSeq(i.toSeq(requiredSchema) ++ 
metadataStructGenericRow.toSeq)

Review comment:
       shall we just create a `JoinedRow` here?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to