Hi, 
看起来像是这几个项目中的版本并不适配,导致com.google.common.base.Preconditions这个类版本冲突导致的,可以尝试下将这个包在flink和hudi中shade一下试试













--

    Best!
    Xuyang





At 2022-09-14 09:27:45, "Summer" <binjie...@paat.com> wrote:
>
>版本:Flink1.13.3、Hudi0.10.1、Hive3.1.2、Hadoop3.2.1
>
>
>编译:Hudi:mvn clean package -DskipITs  -Dmaven.test.skip=true 
>-Dhadoop.version=3.2.1  -Pflink-bundle-shade-hive3
>
>Flink-SQL-HIVE:mvn clean install -Dfast -Dhadoop.version=3.2.1   -Dscala-2.11 
>-DskipTests  -Dfast -T 4 -Dmaven.compile.fork=true -Dmaven.javadoc.skip=true 
>-Dcheckstyle.skip=true
>
>
>
>启动:./sql-client.sh embedded -j ../lib/hudi-flink-bundle_2.11-0.10.1-rc1.jar
>
>Lib目录:
>
>
>[root@rhy-t-bd-java lib]# ll
>total 271504
>-rw-r--r-- 1 root  root      92313 Oct 12  2021 flink-csv-1.13.3.jar
>-rw-r--r-- 1 root  root  106535831 Oct 12  2021 flink-dist_2.12-1.13.3.jar
>-rw-r--r-- 1 root  root     148127 Oct 12  2021 flink-json-1.13.3.jar
>-rwxrwxrwx 1 root  root    7709740 Jun  8  2021 
>flink-shaded-zookeeper-3.4.14.jar
>-rw-r--r-- 1 stack wheel  48845196 Sep 13 18:43 
>flink-sql-connector-hive-3.1.2_2.11-1.13.2.jar
>-rw-r--r-- 1 root  root   35051553 Oct 12  2021 flink-table_2.12-1.13.3.jar
>-rw-r--r-- 1 root  root   38613339 Oct 12  2021 
>flink-table-blink_2.12-1.13.3.jar
>-rw-r--r-- 1 root  root   38955252 Sep 13 17:20 
>hudi-flink-bundle_2.11-0.10.1-rc1.jar
>-rwxrwxrwx 1 root  root      67114 Mar 31  2021 log4j-1.2-api-2.12.1.jar
>-rwxrwxrwx 1 root  root     276771 Mar 31  2021 log4j-api-2.12.1.jar
>-rwxrwxrwx 1 root  root    1674433 Mar 31  2021 log4j-core-2.12.1.jar
>-rwxrwxrwx 1 root  root      23518 Mar 31  2021 log4j-slf4j-impl-2.12.1.jar
>
>
>Flink-SQL:CREATE TABLE paat_hudi_flink_test(
>
>id bigint ,
>
>name string,
>
>birthday TIMESTAMP(3),
>
>tsTIMESTAMP(3),
>
>partition VARCHAR(20),
>
>primary key(id) not enforced -- the uuid primary key must be specified
>
>)
>
>PARTITIONED BY (partition)
>
>with(
>
>'connector'='hudi',
>
>'path' = 
>'hdfs://emr-cluster/user/hive/hudi/warehouse/ods_hudi.hudi_flink_test/'
>
>, 'hoodie.datasource.write.recordkey.field' = 'id'
>
>, 'write.precombine.field' = 'ts'
>
>, 'write.tasks' = '1'
>
>, 'compaction.tasks' = '1'
>
>, 'write.rate.limit' = '2000'
>
>, 'table.type' = 'MERGE_ON_READ'
>
>, 'compaction.async.enable' = 'true'
>
>, 'compaction.trigger.strategy' = 'num_commits'
>
>, 'compaction.max_memory' = '1024'
>
>, 'changelog.enable' = 'true'
>
>, 'read.streaming.enable' = 'true'
>
>, 'read.streaming.check-interval' = '4'
>
>, 'hive_sync.enable' = 'true'
>
>, 'hive_sync.mode'= 'hms'
>
>, 'hive_sync.metastore.uris' = 'thrift://:9083'
>, 'hive_sync.jdbc_url' = 'jdbc:hive2://
>
>, 'hive_sync.jdbc_url' = 'jdbc:hive2://:10000'
>, 'hive_sync.table' = 'hudi_flink_test'
>, 'hive_sync.db' = 'ods_hudi'
>, 'hive_sync.username' = '
>
>, 'hive_sync.table' = 'hudi_flink_test'
>
>, 'hive_sync.db' = 'ods_hudi'
>
>, 'hive_sync.username' = ''
>, 'hive_sync.password' = '
>
>, 'hive_sync.password' = '*^'
>
>, 'hive_sync.support_timestamp' = 'true'
>
>);
>
>Query: select * from paat_hudi_flink_test;
>
>
>
>
>出现错误:2022-09-13 18:45:29,203 INFO  
>org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor         [] - The 
>RpcEndpoint jobmanager_2 failed.
>org.apache.flink.runtime.rpc.akka.exceptions.AkkaRpcException: Could not start 
>RpcEndpoint jobmanager_2.
>        at 
> org.apache.flink.runtime.rpc.akka.AkkaRpcActor$StoppedState.start(AkkaRpcActor.java:610)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleControlMessage(AkkaRpcActor.java:180)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:26) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:21) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at scala.PartialFunction.applyOrElse(PartialFunction.scala:123) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at scala.PartialFunction.applyOrElse$(PartialFunction.scala:122) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:21) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:171) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:172) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at akka.actor.Actor.aroundReceive(Actor.scala:517) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at akka.actor.Actor.aroundReceive$(Actor.scala:515) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:225) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at akka.actor.ActorCell.receiveMessage(ActorCell.scala:592) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at akka.actor.ActorCell.invoke(ActorCell.scala:561) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:258) 
> [flink-dist_2.12-1.13.3.jar:1.13.3]
>        at akka.dispatch.Mailbox.run(Mailbox.scala:225) 
> [flink-dist_2.12-1.13.3.jar:1.13.3]
>        at akka.dispatch.Mailbox.exec(Mailbox.scala:235) 
> [flink-dist_2.12-1.13.3.jar:1.13.3]
>        at akka.dispatch.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260) 
> [flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> akka.dispatch.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339) 
> [flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> akka.dispatch.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979) 
> [flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> akka.dispatch.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
>  [flink-dist_2.12-1.13.3.jar:1.13.3]
>Caused by: org.apache.flink.runtime.jobmaster.JobMasterException: Could not 
>start the JobMaster.
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.onStart(JobMaster.java:385) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.rpc.RpcEndpoint.internalCallOnStart(RpcEndpoint.java:181)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.rpc.akka.AkkaRpcActor$StoppedState.start(AkkaRpcActor.java:605)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        ... 20 more
>Caused by: org.apache.flink.util.FlinkRuntimeException: Failed to start the 
>operator coordinators
>        at 
> org.apache.flink.runtime.scheduler.DefaultOperatorCoordinatorHandler.startAllOperatorCoordinators(DefaultOperatorCoordinatorHandler.java:90)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.scheduler.SchedulerBase.startScheduling(SchedulerBase.java:592)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.startScheduling(JobMaster.java:955)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.startJobExecution(JobMaster.java:873)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.onStart(JobMaster.java:383) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.rpc.RpcEndpoint.internalCallOnStart(RpcEndpoint.java:181)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.rpc.akka.AkkaRpcActor$StoppedState.start(AkkaRpcActor.java:605)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        ... 20 more
>Caused by: org.apache.hudi.exception.HoodieIOException: Failed to get instance 
>of org.apache.hadoop.fs.FileSystem
>        at org.apache.hudi.common.fs.FSUtils.getFs(FSUtils.java:104) 
> ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.hudi.util.StreamerUtil.tableExists(StreamerUtil.java:288) 
> ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.hudi.util.StreamerUtil.initTableIfNotExists(StreamerUtil.java:258) 
> ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.hudi.sink.StreamWriteOperatorCoordinator.start(StreamWriteOperatorCoordinator.java:164)
>  ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.flink.runtime.operators.coordination.OperatorCoordinatorHolder.start(OperatorCoordinatorHolder.java:194)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.scheduler.DefaultOperatorCoordinatorHandler.startAllOperatorCoordinators(DefaultOperatorCoordinatorHandler.java:85)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.scheduler.SchedulerBase.startScheduling(SchedulerBase.java:592)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.startScheduling(JobMaster.java:955)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.startJobExecution(JobMaster.java:873)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.onStart(JobMaster.java:383) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.rpc.RpcEndpoint.internalCallOnStart(RpcEndpoint.java:181)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.rpc.akka.AkkaRpcActor$StoppedState.start(AkkaRpcActor.java:605)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        ... 20 more
>Caused by: java.io.IOException: Couldn't create proxy provider class 
>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
>        at 
> org.apache.hadoop.hdfs.NameNodeProxiesClient.createFailoverProxyProvider(NameNodeProxiesClient.java:261)
>  ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at 
> org.apache.hadoop.hdfs.NameNodeProxiesClient.createFailoverProxyProvider(NameNodeProxiesClient.java:224)
>  ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at 
> org.apache.hadoop.hdfs.NameNodeProxiesClient.createProxyWithClientProtocol(NameNodeProxiesClient.java:134)
>  ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:356) 
> ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:290) 
> ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at 
> org.apache.hadoop.hdfs.DistributedFileSystem.initialize(DistributedFileSystem.java:171)
>  ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at 
> org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3303) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:124) 
> ~[hadoop-common-3.2.1.jar:?]
>        at 
> org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:3352) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:3320) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:479) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hadoop.fs.Path.getFileSystem(Path.java:365) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hudi.common.fs.FSUtils.getFs(FSUtils.java:102) 
> ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.hudi.util.StreamerUtil.tableExists(StreamerUtil.java:288) 
> ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.hudi.util.StreamerUtil.initTableIfNotExists(StreamerUtil.java:258) 
> ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.hudi.sink.StreamWriteOperatorCoordinator.start(StreamWriteOperatorCoordinator.java:164)
>  ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.flink.runtime.operators.coordination.OperatorCoordinatorHolder.start(OperatorCoordinatorHolder.java:194)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.scheduler.DefaultOperatorCoordinatorHandler.startAllOperatorCoordinators(DefaultOperatorCoordinatorHandler.java:85)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.scheduler.SchedulerBase.startScheduling(SchedulerBase.java:592)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.startScheduling(JobMaster.java:955)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.startJobExecution(JobMaster.java:873)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.onStart(JobMaster.java:383) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.rpc.RpcEndpoint.internalCallOnStart(RpcEndpoint.java:181)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.rpc.akka.AkkaRpcActor$StoppedState.start(AkkaRpcActor.java:605)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        ... 20 more
>Caused by: java.lang.reflect.InvocationTargetException
>        at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native 
> Method) ~[?:1.8.0_231]
>        at 
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
>  ~[?:1.8.0_231]
>        at 
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
>  ~[?:1.8.0_231]
>        at java.lang.reflect.Constructor.newInstance(Constructor.java:423) 
> ~[?:1.8.0_231]
>        at 
> org.apache.hadoop.hdfs.NameNodeProxiesClient.createFailoverProxyProvider(NameNodeProxiesClient.java:245)
>  ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at 
> org.apache.hadoop.hdfs.NameNodeProxiesClient.createFailoverProxyProvider(NameNodeProxiesClient.java:224)
>  ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at 
> org.apache.hadoop.hdfs.NameNodeProxiesClient.createProxyWithClientProtocol(NameNodeProxiesClient.java:134)
>  ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:356) 
> ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:290) 
> ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at 
> org.apache.hadoop.hdfs.DistributedFileSystem.initialize(DistributedFileSystem.java:171)
>  ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at 
> org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3303) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:124) 
> ~[hadoop-common-3.2.1.jar:?]
>        at 
> org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:3352) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:3320) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:479) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hadoop.fs.Path.getFileSystem(Path.java:365) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hudi.common.fs.FSUtils.getFs(FSUtils.java:102) 
> ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.hudi.util.StreamerUtil.tableExists(StreamerUtil.java:288) 
> ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.hudi.util.StreamerUtil.initTableIfNotExists(StreamerUtil.java:258) 
> ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.hudi.sink.StreamWriteOperatorCoordinator.start(StreamWriteOperatorCoordinator.java:164)
>  ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.flink.runtime.operators.coordination.OperatorCoordinatorHolder.start(OperatorCoordinatorHolder.java:194)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.scheduler.DefaultOperatorCoordinatorHandler.startAllOperatorCoordinators(DefaultOperatorCoordinatorHandler.java:85)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.scheduler.SchedulerBase.startScheduling(SchedulerBase.java:592)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.startScheduling(JobMaster.java:955)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.startJobExecution(JobMaster.java:873)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.onStart(JobMaster.java:383) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.rpc.RpcEndpoint.internalCallOnStart(RpcEndpoint.java:181)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.rpc.akka.AkkaRpcActor$StoppedState.start(AkkaRpcActor.java:605)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        ... 20 more
>Caused by: java.lang.NoSuchMethodError: 
>com.google.common.base.Preconditions.checkArgument(ZLjava/lang/String;Ljava/lang/Object;)V
>        at org.apache.hadoop.conf.Configuration.set(Configuration.java:1357) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hadoop.conf.Configuration.set(Configuration.java:1338) 
> ~[hadoop-common-3.2.1.jar:?]
>        at 
> org.apache.hadoop.conf.Configuration.setInt(Configuration.java:1515) 
> ~[hadoop-common-3.2.1.jar:?]
>        at 
> org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider.(AbstractNNFailoverProxyProvider.java:70)
>  ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at 
> org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider.(ConfiguredFailoverProxyProvider.java:50)
>  ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at 
> org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider.(ConfiguredFailoverProxyProvider.java:45)
>  ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native 
> Method) ~[?:1.8.0_231]
>        at 
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
>  ~[?:1.8.0_231]
>        at 
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
>  ~[?:1.8.0_231]
>        at java.lang.reflect.Constructor.newInstance(Constructor.java:423) 
> ~[?:1.8.0_231]
>        at 
> org.apache.hadoop.hdfs.NameNodeProxiesClient.createFailoverProxyProvider(NameNodeProxiesClient.java:245)
>  ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at 
> org.apache.hadoop.hdfs.NameNodeProxiesClient.createFailoverProxyProvider(NameNodeProxiesClient.java:224)
>  ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at 
> org.apache.hadoop.hdfs.NameNodeProxiesClient.createProxyWithClientProtocol(NameNodeProxiesClient.java:134)
>  ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:356) 
> ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:290) 
> ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at 
> org.apache.hadoop.hdfs.DistributedFileSystem.initialize(DistributedFileSystem.java:171)
>  ~[hadoop-hdfs-client-3.2.1.jar:?]
>        at 
> org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3303) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:124) 
> ~[hadoop-common-3.2.1.jar:?]
>        at 
> org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:3352) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:3320) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:479) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hadoop.fs.Path.getFileSystem(Path.java:365) 
> ~[hadoop-common-3.2.1.jar:?]
>        at org.apache.hudi.common.fs.FSUtils.getFs(FSUtils.java:102) 
> ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.hudi.util.StreamerUtil.tableExists(StreamerUtil.java:288) 
> ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.hudi.util.StreamerUtil.initTableIfNotExists(StreamerUtil.java:258) 
> ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.hudi.sink.StreamWriteOperatorCoordinator.start(StreamWriteOperatorCoordinator.java:164)
>  ~[hudi-flink-bundle_2.11-0.10.1-rc1.jar:0.10.1-rc1]
>        at 
> org.apache.flink.runtime.operators.coordination.OperatorCoordinatorHolder.start(OperatorCoordinatorHolder.java:194)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.scheduler.DefaultOperatorCoordinatorHandler.startAllOperatorCoordinators(DefaultOperatorCoordinatorHandler.java:85)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.scheduler.SchedulerBase.startScheduling(SchedulerBase.java:592)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.startScheduling(JobMaster.java:955)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.startJobExecution(JobMaster.java:873)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.jobmaster.JobMaster.onStart(JobMaster.java:383) 
> ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.rpc.RpcEndpoint.internalCallOnStart(RpcEndpoint.java:181)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        at 
> org.apache.flink.runtime.rpc.akka.AkkaRpcActor$StoppedState.start(AkkaRpcActor.java:605)
>  ~[flink-dist_2.12-1.13.3.jar:1.13.3]
>        ... 20 more
>
>
>请问这是guava版本不一致造成的吗??有什么解决方案吗??或者是其它什么原因??
>
>
>
>
>
>
>
>
>
>
>
>

回复