jonvex commented on code in PR #10956:
URL: https://github.com/apache/hudi/pull/10956#discussion_r1565961863


##########
hudi-spark-datasource/hudi-spark3-common/src/main/scala/org/apache/spark/sql/execution/datasources/Spark3ParquetSchemaEvolutionUtils.scala:
##########
@@ -0,0 +1,194 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.spark.sql.execution.datasources
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.fs.Path
+import org.apache.hudi.client.utils.SparkInternalSchemaConverter
+import org.apache.hudi.common.fs.FSUtils
+import org.apache.hudi.common.util
+import org.apache.hudi.common.util.InternalSchemaCache
+import org.apache.hudi.common.util.StringUtils.isNullOrEmpty
+import org.apache.hudi.common.util.collection.Pair
+import org.apache.hudi.internal.schema.InternalSchema
+import org.apache.hudi.internal.schema.action.InternalSchemaMerger
+import org.apache.hudi.internal.schema.utils.{InternalSchemaUtils, SerDeHelper}
+import org.apache.parquet.hadoop.metadata.FileMetaData
+import 
org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
+import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Cast, 
UnsafeProjection}
+import 
org.apache.spark.sql.execution.datasources.Spark3ParquetSchemaEvolutionUtils.pruneInternalSchema
+import 
org.apache.spark.sql.execution.datasources.parquet.{HoodieParquetFileFormatHelper,
 ParquetReadSupport}
+import org.apache.spark.sql.sources._
+import org.apache.spark.sql.types.{AtomicType, DataType, StructField, 
StructType}
+
+import scala.collection.convert.ImplicitConversions.`collection 
AsScalaIterable`
+
+abstract class Spark3ParquetSchemaEvolutionUtils(sharedConf: Configuration,
+                                                 filePath: Path,
+                                                 requiredSchema: StructType,
+                                                 partitionSchema: StructType) {
+  // Fetch internal schema
+  private lazy val internalSchemaStr: String = 
sharedConf.get(SparkInternalSchemaConverter.HOODIE_QUERY_SCHEMA)
+
+  private lazy val querySchemaOption: util.Option[InternalSchema] = 
pruneInternalSchema(internalSchemaStr, requiredSchema)
+
+  var shouldUseInternalSchema: Boolean = !isNullOrEmpty(internalSchemaStr) && 
querySchemaOption.isPresent
+
+  private lazy val tablePath: String = 
sharedConf.get(SparkInternalSchemaConverter.HOODIE_TABLE_PATH)
+  private lazy val fileSchema: InternalSchema = if (shouldUseInternalSchema) {
+    val commitInstantTime = FSUtils.getCommitTime(filePath.getName).toLong;
+    val validCommits = 
sharedConf.get(SparkInternalSchemaConverter.HOODIE_VALID_COMMITS_LIST)
+    InternalSchemaCache.getInternalSchemaByVersionId(commitInstantTime, 
tablePath, sharedConf, if (validCommits == null) "" else validCommits)
+  } else {
+    null
+  }
+
+  def rebuildFilterFromParquet(filter: Filter): Filter = {
+    rebuildFilterFromParquetHelper(filter, fileSchema, 
querySchemaOption.orElse(null))
+  }
+
+  private def rebuildFilterFromParquetHelper(oldFilter: Filter, fileSchema: 
InternalSchema, querySchema: InternalSchema): Filter = {
+    if (fileSchema == null || querySchema == null) {
+      oldFilter
+    } else {
+      oldFilter match {
+        case eq: EqualTo =>
+          val newAttribute = 
InternalSchemaUtils.reBuildFilterName(eq.attribute, fileSchema, querySchema)
+          if (newAttribute.isEmpty) AlwaysTrue else eq.copy(attribute = 
newAttribute)
+        case eqs: EqualNullSafe =>
+          val newAttribute = 
InternalSchemaUtils.reBuildFilterName(eqs.attribute, fileSchema, querySchema)
+          if (newAttribute.isEmpty) AlwaysTrue else eqs.copy(attribute = 
newAttribute)
+        case gt: GreaterThan =>
+          val newAttribute = 
InternalSchemaUtils.reBuildFilterName(gt.attribute, fileSchema, querySchema)
+          if (newAttribute.isEmpty) AlwaysTrue else gt.copy(attribute = 
newAttribute)
+        case gtr: GreaterThanOrEqual =>
+          val newAttribute = 
InternalSchemaUtils.reBuildFilterName(gtr.attribute, fileSchema, querySchema)
+          if (newAttribute.isEmpty) AlwaysTrue else gtr.copy(attribute = 
newAttribute)
+        case lt: LessThan =>
+          val newAttribute = 
InternalSchemaUtils.reBuildFilterName(lt.attribute, fileSchema, querySchema)
+          if (newAttribute.isEmpty) AlwaysTrue else lt.copy(attribute = 
newAttribute)
+        case lte: LessThanOrEqual =>
+          val newAttribute = 
InternalSchemaUtils.reBuildFilterName(lte.attribute, fileSchema, querySchema)
+          if (newAttribute.isEmpty) AlwaysTrue else lte.copy(attribute = 
newAttribute)
+        case i: In =>
+          val newAttribute = 
InternalSchemaUtils.reBuildFilterName(i.attribute, fileSchema, querySchema)
+          if (newAttribute.isEmpty) AlwaysTrue else i.copy(attribute = 
newAttribute)
+        case isn: IsNull =>
+          val newAttribute = 
InternalSchemaUtils.reBuildFilterName(isn.attribute, fileSchema, querySchema)
+          if (newAttribute.isEmpty) AlwaysTrue else isn.copy(attribute = 
newAttribute)
+        case isnn: IsNotNull =>
+          val newAttribute = 
InternalSchemaUtils.reBuildFilterName(isnn.attribute, fileSchema, querySchema)
+          if (newAttribute.isEmpty) AlwaysTrue else isnn.copy(attribute = 
newAttribute)
+        case And(left, right) =>
+          And(rebuildFilterFromParquetHelper(left, fileSchema, querySchema), 
rebuildFilterFromParquetHelper(right, fileSchema, querySchema))
+        case Or(left, right) =>
+          Or(rebuildFilterFromParquetHelper(left, fileSchema, querySchema), 
rebuildFilterFromParquetHelper(right, fileSchema, querySchema))
+        case Not(child) =>
+          Not(rebuildFilterFromParquetHelper(child, fileSchema, querySchema))
+        case ssw: StringStartsWith =>
+          val newAttribute = 
InternalSchemaUtils.reBuildFilterName(ssw.attribute, fileSchema, querySchema)
+          if (newAttribute.isEmpty) AlwaysTrue else ssw.copy(attribute = 
newAttribute)
+        case ses: StringEndsWith =>
+          val newAttribute = 
InternalSchemaUtils.reBuildFilterName(ses.attribute, fileSchema, querySchema)
+          if (newAttribute.isEmpty) AlwaysTrue else ses.copy(attribute = 
newAttribute)
+        case sc: StringContains =>
+          val newAttribute = 
InternalSchemaUtils.reBuildFilterName(sc.attribute, fileSchema, querySchema)
+          if (newAttribute.isEmpty) AlwaysTrue else sc.copy(attribute = 
newAttribute)
+        case AlwaysTrue =>
+          AlwaysTrue
+        case AlwaysFalse =>
+          AlwaysFalse
+        case _ =>
+          AlwaysTrue
+      }
+    }
+  }
+
+  protected var typeChangeInfos: java.util.Map[Integer, Pair[DataType, 
DataType]] = null
+
+  def getHadoopConfClone(footerFileMetaData: FileMetaData, 
enableVectorizedReader: Boolean): Configuration = {
+    // Clone new conf
+    val hadoopAttemptConf = new Configuration(sharedConf)
+    typeChangeInfos = if (shouldUseInternalSchema) {
+      val mergedInternalSchema = new InternalSchemaMerger(fileSchema, 
querySchemaOption.get(), true, true).mergeSchema()
+      val mergedSchema = 
SparkInternalSchemaConverter.constructSparkSchemaFromInternalSchema(mergedInternalSchema)
+
+      hadoopAttemptConf.set(ParquetReadSupport.SPARK_ROW_REQUESTED_SCHEMA, 
mergedSchema.json)
+
+      
SparkInternalSchemaConverter.collectTypeChangedCols(querySchemaOption.get(), 
mergedInternalSchema)
+    } else {
+      val (implicitTypeChangeInfo, sparkRequestSchema) = 
HoodieParquetFileFormatHelper.buildImplicitSchemaChangeInfo(hadoopAttemptConf, 
footerFileMetaData, requiredSchema)
+      if (!implicitTypeChangeInfo.isEmpty) {
+        shouldUseInternalSchema = true
+        hadoopAttemptConf.set(ParquetReadSupport.SPARK_ROW_REQUESTED_SCHEMA, 
sparkRequestSchema.json)
+      }
+      implicitTypeChangeInfo
+    }
+
+    if (enableVectorizedReader && shouldUseInternalSchema &&
+      !typeChangeInfos.values().forall(_.getLeft.isInstanceOf[AtomicType])) {
+      throw new IllegalArgumentException(
+        "Nested types with type changes(implicit or explicit) cannot be read 
in vectorized mode. " +
+          "To workaround this issue, set 
spark.sql.parquet.enableVectorizedReader=false.")
+    }
+
+    hadoopAttemptConf
+  }
+
+  def generateUnsafeProjection(fullSchema: Seq[AttributeReference], 
timeZoneId: Option[String]): UnsafeProjection = {

Review Comment:
   That is a good idea



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@hudi.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to