This is an automated email from the ASF dual-hosted git repository. vinoth pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hudi.git
The following commit(s) were added to refs/heads/master by this push: new 23e93d0 [MINOR] fix spark 3 build for incremental query on MOR (#2425) 23e93d0 is described below commit 23e93d05c02775b0c518a2da160810b23bb4bca4 Author: Gary Li <yanjia.gary...@gmail.com> AuthorDate: Sun Jan 10 13:08:55 2021 +0800 [MINOR] fix spark 3 build for incremental query on MOR (#2425) --- .../scala/org/apache/hudi/MergeOnReadIncrementalRelation.scala | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/hudi/MergeOnReadIncrementalRelation.scala b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/hudi/MergeOnReadIncrementalRelation.scala index d7b8cff..c85b972 100644 --- a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/hudi/MergeOnReadIncrementalRelation.scala +++ b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/hudi/MergeOnReadIncrementalRelation.scala @@ -17,8 +17,6 @@ package org.apache.hudi -import org.apache.hadoop.fs.{FileStatus, FileSystem, GlobPattern, Path} -import org.apache.hadoop.mapred.JobConf import org.apache.hudi.common.fs.FSUtils import org.apache.hudi.common.model.HoodieRecord import org.apache.hudi.common.table.view.HoodieTableFileSystemView @@ -26,8 +24,10 @@ import org.apache.hudi.common.table.{HoodieTableMetaClient, TableSchemaResolver} import org.apache.hudi.exception.HoodieException import org.apache.hudi.hadoop.utils.HoodieInputFormatUtils.listAffectedFilesForCommits import org.apache.hudi.hadoop.utils.HoodieRealtimeRecordReaderUtils.getMaxCompactionMemoryInBytes + +import org.apache.hadoop.fs.{FileStatus, GlobPattern, Path} +import org.apache.hadoop.mapred.JobConf import org.apache.log4j.LogManager -import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.execution.datasources.PartitionedFile @@ -138,9 +138,6 @@ class MergeOnReadIncrementalRelation(val sqlContext: SQLContext, hadoopConf = sqlContext.sparkSession.sessionState.newHadoopConf() ) - // Follow the implementation of Spark internal HadoopRDD to handle the broadcast configuration. - FileSystem.getLocal(jobConf) - SparkHadoopUtil.get.addCredentials(jobConf) val rdd = new HoodieMergeOnReadRDD( sqlContext.sparkContext, jobConf,