vinothsiva1989 opened a new issue #2031:
URL: https://github.com/apache/hudi/issues/2031


   i am new to hudi please help .
   
   A clear and concise description of the problem.
   
   **To Reproduce**
   
   Steps to reproduce the behavior: 
   step1:
   spark-shell \
     --packages 
org.apache.hudi:hudi-spark-bundle_2.12:0.6.0,'org.apache.spark:spark-avro_2.12:3.0.0'
 \
     --conf 'spark.serializer=org.apache.spark.serializer.KryoSerializer' 
--conf 'spark.sql.hive.convertMetastoreParquet=false'
   step2:
   import org.apache.spark.sql.SaveMode
   import org.apache.spark.sql.functions._ 
   import org.apache.hudi.DataSourceWriteOptions 
   import org.apache.hudi.config.HoodieWriteConfig 
   import org.apache.hudi.hive.MultiPartKeysValueExtractor
   import org.apache.spark.sql._
   
   step3: create hive table 
   create external table hudi_parquet (op string,
   pk_id int,
   name string,
   value int,
   updated_at timestamp,
   created_at timestamp)
   stored as parquet
   location '/user/vinoth.siva/hudi_parquet';
   
   step4: insert data into hive table
   insert into hudi_parquet values ('I',5,'htc',50,'2020-02-06 
18:00:39','2020-02-06 18:00:39')
   
   step 5: loding data into dataframe
   val df=spark.read.parquet("/user/vinoth.siva/hudi_parquet/000000_0");
   df.show()
   
   Step6: Hudi options
    val hudiOptions = Map[String,String](
     HoodieWriteConfig.TABLE_NAME -> "my_hudi_table",
     DataSourceWriteOptions.TABLE_TYPE_OPT_KEY -> "COPY_ON_WRITE", 
     DataSourceWriteOptions.RECORDKEY_FIELD_OPT_KEY -> "pk_id",
     DataSourceWriteOptions.PARTITIONPATH_FIELD_OPT_KEY -> "created_at",
     DataSourceWriteOptions.PRECOMBINE_FIELD_OPT_KEY -> "updated_at",
     DataSourceWriteOptions.HIVE_SYNC_ENABLED_OPT_KEY -> "true",
     DataSourceWriteOptions.HIVE_TABLE_OPT_KEY -> "my_hudi_table",
     DataSourceWriteOptions.HIVE_PARTITION_FIELDS_OPT_KEY -> "created_at",
     DataSourceWriteOptions.HIVE_PARTITION_EXTRACTOR_CLASS_OPT_KEY -> 
classOf[MultiPartKeysValueExtractor].getName
   )
   
   step7: Writing hudi data.
    
df.write.format("org.apache.hudi").option(DataSourceWriteOptions.OPERATION_OPT_KEY,
 
DataSourceWriteOptions.INSERT_OPERATION_OPT_VAL).options(hudiOptions).mode(SaveMode.Overwrite).save("/user/vinoth.siva/hudi_cow")
    
   
   
   **Expected behavior**
   
   A clear and concise description of what you expected to happen.
   
   **Environment Description**
   
   * Hudi version :0.6.0
   
   * Spark version :3
   
   * Hive version :1.2
   
   * Hadoop version :2.7
   
   * Storage (HDFS/S3/GCS..) :hdfs
   
   * Running on Docker? (yes/no) 🔕 no
   
   
   **Additional context**
   
   Add any other context about the problem here.
   
   **Stacktrace**
   
   20/08/25 10:01:21 WARN hudi.HoodieSparkSqlWriter$: hoodie table at 
/user/vinoth.siva/hudi_cow already exists. Deleting existing data & overwriting 
with new data.
   20/08/25 10:01:22 ERROR executor.Executor: Exception in task 0.0 in stage 
3.0 (TID 3)
   java.lang.NoSuchMethodError: 'java.lang.Object 
org.apache.spark.sql.catalyst.encoders.ExpressionEncoder.fromRow(org.apache.spark.sql.catalyst.InternalRow)'
           at 
org.apache.hudi.AvroConversionUtils$.$anonfun$createRdd$1(AvroConversionUtils.scala:44)
           at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
           at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
           at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
           at scala.collection.Iterator$SliceIterator.next(Iterator.scala:271)
           at scala.collection.Iterator.foreach(Iterator.scala:941)
           at scala.collection.Iterator.foreach$(Iterator.scala:941)
           at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
           at scala.collection.generic.Growable.$plus$plus$eq(Growable.scala:62)
           at 
scala.collection.generic.Growable.$plus$plus$eq$(Growable.scala:53)
           at 
scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:105)
           at 
scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:49)
           at scala.collection.TraversableOnce.to(TraversableOnce.scala:315)
           at scala.collection.TraversableOnce.to$(TraversableOnce.scala:313)
           at scala.collection.AbstractIterator.to(Iterator.scala:1429)
           at 
scala.collection.TraversableOnce.toBuffer(TraversableOnce.scala:307)
           at 
scala.collection.TraversableOnce.toBuffer$(TraversableOnce.scala:307)
           at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1429)
           at 
scala.collection.TraversableOnce.toArray(TraversableOnce.scala:294)
           at 
scala.collection.TraversableOnce.toArray$(TraversableOnce.scala:288)
           at scala.collection.AbstractIterator.toArray(Iterator.scala:1429)
           at org.apache.spark.rdd.RDD.$anonfun$take$2(RDD.scala:1423)
           at 
org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2133)
           at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
           at org.apache.spark.scheduler.Task.run(Task.scala:127)
           at 
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
           at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
           at 
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
           at 
java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1130)
           at 
java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:630)
           at java.base/java.lang.Thread.run(Thread.java:832)
   20/08/25 10:01:22 WARN scheduler.TaskSetManager: Lost task 0.0 in stage 3.0 
(TID 3, im-hdp-mgr1.infocepts.com, executor driver): 
java.lang.NoSuchMethodError: 'java.lang.Object 
org.apache.spark.sql.catalyst.encoders.ExpressionEncoder.fromRow(org.apache.spark.sql.catalyst.InternalRow)'
           at 
org.apache.hudi.AvroConversionUtils$.$anonfun$createRdd$1(AvroConversionUtils.scala:44)
           at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
           at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
           at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
           at scala.collection.Iterator$SliceIterator.next(Iterator.scala:271)
           at scala.collection.Iterator.foreach(Iterator.scala:941)
           at scala.collection.Iterator.foreach$(Iterator.scala:941)
           at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
           at scala.collection.generic.Growable.$plus$plus$eq(Growable.scala:62)
           at 
scala.collection.generic.Growable.$plus$plus$eq$(Growable.scala:53)
           at 
scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:105)
           at 
scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:49)
           at scala.collection.TraversableOnce.to(TraversableOnce.scala:315)
           at scala.collection.TraversableOnce.to$(TraversableOnce.scala:313)
           at scala.collection.AbstractIterator.to(Iterator.scala:1429)
           at 
scala.collection.TraversableOnce.toBuffer(TraversableOnce.scala:307)
           at 
scala.collection.TraversableOnce.toBuffer$(TraversableOnce.scala:307)
           at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1429)
           at 
scala.collection.TraversableOnce.toArray(TraversableOnce.scala:294)
           at 
scala.collection.TraversableOnce.toArray$(TraversableOnce.scala:288)
           at scala.collection.AbstractIterator.toArray(Iterator.scala:1429)
           at org.apache.spark.rdd.RDD.$anonfun$take$2(RDD.scala:1423)
           at 
org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2133)
           at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
           at org.apache.spark.scheduler.Task.run(Task.scala:127)
           at 
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
           at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
           at 
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
           at 
java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1130)
           at 
java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:630)
           at java.base/java.lang.Thread.run(Thread.java:832)
   
   20/08/25 10:01:22 ERROR scheduler.TaskSetManager: Task 0 in stage 3.0 failed 
1 times; aborting job
   org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in 
stage 3.0 failed 1 times, most recent failure: Lost task 0.0 in stage 3.0 (TID 
3, im-hdp-mgr1.infocepts.com, executor driver): java.lang.NoSuchMethodError: 
'java.lang.Object 
org.apache.spark.sql.catalyst.encoders.ExpressionEncoder.fromRow(org.apache.spark.sql.catalyst.InternalRow)'
           at 
org.apache.hudi.AvroConversionUtils$.$anonfun$createRdd$1(AvroConversionUtils.scala:44)
           at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
           at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
           at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
           at scala.collection.Iterator$SliceIterator.next(Iterator.scala:271)
           at scala.collection.Iterator.foreach(Iterator.scala:941)
           at scala.collection.Iterator.foreach$(Iterator.scala:941)
           at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
           at scala.collection.generic.Growable.$plus$plus$eq(Growable.scala:62)
           at 
scala.collection.generic.Growable.$plus$plus$eq$(Growable.scala:53)
           at 
scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:105)
           at 
scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:49)
           at scala.collection.TraversableOnce.to(TraversableOnce.scala:315)
           at scala.collection.TraversableOnce.to$(TraversableOnce.scala:313)
           at scala.collection.AbstractIterator.to(Iterator.scala:1429)
           at 
scala.collection.TraversableOnce.toBuffer(TraversableOnce.scala:307)
           at 
scala.collection.TraversableOnce.toBuffer$(TraversableOnce.scala:307)
           at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1429)
           at 
scala.collection.TraversableOnce.toArray(TraversableOnce.scala:294)
           at 
scala.collection.TraversableOnce.toArray$(TraversableOnce.scala:288)
           at scala.collection.AbstractIterator.toArray(Iterator.scala:1429)
           at org.apache.spark.rdd.RDD.$anonfun$take$2(RDD.scala:1423)
           at 
org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2133)
           at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
           at org.apache.spark.scheduler.Task.run(Task.scala:127)
           at 
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
           at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
           at 
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
           at 
java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1130)
           at 
java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:630)
           at java.base/java.lang.Thread.run(Thread.java:832)
   
   Driver stacktrace:
     at 
org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2023)
     at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:1972)
     at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:1971)
     at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
     at 
scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
     at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
     at 
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1971)
     at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:950)
     at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:950)
     at scala.Option.foreach(Option.scala:407)
     at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:950)
     at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2203)
     at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2152)
     at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2141)
     at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
     at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:752)
     at org.apache.spark.SparkContext.runJob(SparkContext.scala:2093)
     at org.apache.spark.SparkContext.runJob(SparkContext.scala:2114)
     at org.apache.spark.SparkContext.runJob(SparkContext.scala:2133)
     at org.apache.spark.rdd.RDD.$anonfun$take$1(RDD.scala:1423)
     at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
     at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
     at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
     at org.apache.spark.rdd.RDD.take(RDD.scala:1396)
     at org.apache.spark.rdd.RDD.$anonfun$isEmpty$1(RDD.scala:1531)
     at scala.runtime.java8.JFunction0$mcZ$sp.apply(JFunction0$mcZ$sp.java:23)
     at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
     at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
     at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
     at org.apache.spark.rdd.RDD.isEmpty(RDD.scala:1531)
     at org.apache.spark.api.java.JavaRDDLike.isEmpty(JavaRDDLike.scala:544)
     at org.apache.spark.api.java.JavaRDDLike.isEmpty$(JavaRDDLike.scala:544)
     at 
org.apache.spark.api.java.AbstractJavaRDDLike.isEmpty(JavaRDDLike.scala:45)
     at 
org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:164)
     at org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:125)
     at 
org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
     at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
     at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
     at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:90)
     at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:175)
     at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:213)
     at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
     at 
org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:210)
     at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:171)
     at 
org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:122)
     at 
org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:121)
     at 
org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:944)
     at 
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:100)
     at 
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
     at 
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:87)
     at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
     at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
     at 
org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:944)
     at 
org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:396)
     at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:380)
     at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:269)
     ... 51 elided
   Caused by: java.lang.NoSuchMethodError: 'java.lang.Object 
org.apache.spark.sql.catalyst.encoders.ExpressionEncoder.fromRow(org.apache.spark.sql.catalyst.InternalRow)'
     at 
org.apache.hudi.AvroConversionUtils$.$anonfun$createRdd$1(AvroConversionUtils.scala:44)
     at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
     at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
     at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
     at scala.collection.Iterator$SliceIterator.next(Iterator.scala:271)
     at scala.collection.Iterator.foreach(Iterator.scala:941)
     at scala.collection.Iterator.foreach$(Iterator.scala:941)
     at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
     at scala.collection.generic.Growable.$plus$plus$eq(Growable.scala:62)
     at scala.collection.generic.Growable.$plus$plus$eq$(Growable.scala:53)
     at 
scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:105)
     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:49)
     at scala.collection.TraversableOnce.to(TraversableOnce.scala:315)
     at scala.collection.TraversableOnce.to$(TraversableOnce.scala:313)
     at scala.collection.AbstractIterator.to(Iterator.scala:1429)
     at scala.collection.TraversableOnce.toBuffer(TraversableOnce.scala:307)
     at scala.collection.TraversableOnce.toBuffer$(TraversableOnce.scala:307)
     at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1429)
     at scala.collection.TraversableOnce.toArray(TraversableOnce.scala:294)
     at scala.collection.TraversableOnce.toArray$(TraversableOnce.scala:288)
     at scala.collection.AbstractIterator.toArray(Iterator.scala:1429)
     at org.apache.spark.rdd.RDD.$anonfun$take$2(RDD.scala:1423)
     at org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2133)
     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
     at org.apache.spark.scheduler.Task.run(Task.scala:127)
     at 
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
     at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
     at 
java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1130)
     at 
java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:630)
     at java.base/java.lang.Thread.run(Thread.java:832)
   
   
   


----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to