[ 
https://issues.apache.org/jira/browse/HUDI-7805?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

ASF GitHub Bot updated HUDI-7805:
---------------------------------
    Labels: pull-request-available  (was: )

> FileSystemBasedLockProvider need delete lock file auto when occur lock 
> conflict to avoid next write failed
> ----------------------------------------------------------------------------------------------------------
>
>                 Key: HUDI-7805
>                 URL: https://issues.apache.org/jira/browse/HUDI-7805
>             Project: Apache Hudi
>          Issue Type: Improvement
>          Components: multi-writer
>            Reporter: xy
>            Assignee: xy
>            Priority: Major
>              Labels: pull-request-available
>
> org.apache.hudi.exception.HoodieLockException: Unable to acquire lock, lock 
> object hdfs://aa-region/region04/2211/warehouse/hudi/odsmon_log/.hoodie/lock
>       at 
> org.apache.hudi.client.transaction.lock.LockManager.lock(LockManager.java:100)
>       at 
> org.apache.hudi.client.transaction.TransactionManager.beginTransaction(TransactionManager.java:58)
>       at 
> org.apache.hudi.client.BaseHoodieWriteClient.doInitTable(BaseHoodieWriteClient.java:1258)
>       at 
> org.apache.hudi.client.BaseHoodieWriteClient.initTable(BaseHoodieWriteClient.java:1301)
>       at 
> org.apache.hudi.client.SparkRDDWriteClient.upsert(SparkRDDWriteClient.java:139)
>       at 
> org.apache.hudi.DataSourceUtils.doWriteOperation(DataSourceUtils.java:216)
>       at 
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:396)
>       at 
> org.apache.spark.sql.hudi.command.InsertIntoHoodieTableCommand$.run(InsertIntoHoodieTableCommand.scala:108)
>       at 
> org.apache.spark.sql.hudi.command.InsertIntoHoodieTableCommand.run(InsertIntoHoodieTableCommand.scala:61)
>       at 
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:80)
>       at 
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:78)
>       at 
> org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:89)
>       at 
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.$anonfun$applyOrElse$1(QueryExecution.scala:110)
>       at 
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
>       at 
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
>       at 
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
>       at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:775)
>       at 
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
>       at 
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:110)
>       at 
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:106)
>       at 
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:481)
>       at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:82)
>       at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:481)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:30)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:30)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:30)
>       at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:457)
>       at 
> org.apache.spark.sql.execution.QueryExecution.eagerlyExecuteCommands(QueryExecution.scala:106)
>       at 
> org.apache.spark.sql.execution.QueryExecution.commandExecuted$lzycompute(QueryExecution.scala:93)
>       at 
> org.apache.spark.sql.execution.QueryExecution.commandExecuted(QueryExecution.scala:91)
>       at org.apache.spark.sql.Dataset.<init>(Dataset.scala:219)
>       at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:99)
>       at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:775)
>       at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:96)
>       at 
> org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:618)
>       at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:775)
>       at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:613)
>       at 
> com.vivo.bigdata.etl.process.EtlProcessMain$.main(EtlProcessMain.scala:367)
>       at 
> com.vivo.bigdata.etl.process.EtlProcessMain.main(EtlProcessMain.scala)
>       at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>       at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
>       at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>       at java.lang.reflect.Method.invoke(Method.java:498)



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

Reply via email to