cloud-fan commented on a change in pull request #34575:
URL: https://github.com/apache/spark/pull/34575#discussion_r773672049



##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileScanRDD.scala
##########
@@ -103,6 +115,101 @@ class FileScanRDD(
         context.killTaskIfInterrupted()
         (currentIterator != null && currentIterator.hasNext) || nextIterator()
       }
+
+      ///////////////////////////
+      // FILE METADATA METHODS //
+      ///////////////////////////
+
+      // a metadata internal row, will only be updated when the current file 
is changed
+      val metadataRow: InternalRow = new 
GenericInternalRow(metadataColumns.length)
+
+      // an unsafe projection to convert a joined internal row to an unsafe row
+      private lazy val projection = {
+        val joinedExpressions =
+          readDataSchema.fields.map(_.dataType) ++ 
metadataColumns.map(_.dataType)
+        UnsafeProjection.create(joinedExpressions)
+      }
+
+      /**
+       * For each partitioned file, metadata columns for each record in the 
file are exactly same.
+       * Only update metadata row when `currentFile` is changed.
+       */
+      private def updateMetadataRow(): Unit = {
+        if (metadataColumns.nonEmpty && currentFile != null) {
+          val path = new Path(currentFile.filePath)
+          metadataColumns.zipWithIndex.foreach { case (attr, i) =>
+            attr.name match {
+              case FILE_PATH => metadataRow.update(i, 
UTF8String.fromString(path.toString))
+              case FILE_NAME => metadataRow.update(i, 
UTF8String.fromString(path.getName))
+              case FILE_SIZE => metadataRow.update(i, currentFile.fileSize)
+              case FILE_MODIFICATION_TIME =>
+                // the modificationTime from the file is in millisecond,
+                // while internally, the TimestampType is stored in microsecond
+                metadataRow.update(i, currentFile.modificationTime * 1000L)
+            }
+          }
+        }
+      }
+
+      /**
+       * Create a writable column vector containing all required metadata 
columns
+       */
+      private def createMetadataColumnVector(c: ColumnarBatch): 
Array[WritableColumnVector] = {
+        val path = new Path(currentFile.filePath)
+        val filePathBytes = path.toString.getBytes
+        val fileNameBytes = path.getName.getBytes
+        var rowId = 0
+        metadataColumns.map(_.name).map {

Review comment:
       per-batch should be fine to have some small overhead.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to