lintingbin commented on PR #3539:
URL: https://github.com/apache/amoro/pull/3539#issuecomment-3068330484
This change will cause errors in our internal environment. Does Hadoop2 need
to support Hive 4 Metastore?
> 2025-07-14 07:47:40,139 ERROR [async-table-runtime-refresh-executor-0]
[org.apache.amoro.server.scheduler.inline.TableRuntimeRefreshExecutor] ] -
Refreshing table hive-test.qwdb.iceberg05(tableId=262837) failed.
java.lang.RuntimeException: Failed to get table info from metastore
qwdb.iceberg05
at
org.apache.iceberg.hive.HiveTableOperations.doRefresh(HiveTableOperations.java:160)
~[iceberg-hive-metastore-1.6.1.jar:?]
at
org.apache.iceberg.BaseMetastoreTableOperations.refresh(BaseMetastoreTableOperations.java:90)
~[iceberg-core-1.6.1.jar:?]
at
org.apache.iceberg.BaseMetastoreTableOperations.current(BaseMetastoreTableOperations.java:73)
~[iceberg-core-1.6.1.jar:?]
at
org.apache.iceberg.BaseMetastoreCatalog.loadTable(BaseMetastoreCatalog.java:49)
~[iceberg-core-1.6.1.jar:?]
at
com.github.benmanes.caffeine.cache.BoundedLocalCache.lambda$doComputeIfAbsent$14(BoundedLocalCache.java:2406)
~[caffeine-2.9.3.jar:?]
at
java.util.concurrent.ConcurrentHashMap.compute(ConcurrentHashMap.java:1908)
~[?:?]
at
com.github.benmanes.caffeine.cache.BoundedLocalCache.doComputeIfAbsent(BoundedLocalCache.java:2404)
~[caffeine-2.9.3.jar:?]
at
com.github.benmanes.caffeine.cache.BoundedLocalCache.computeIfAbsent(BoundedLocalCache.java:2387)
~[caffeine-2.9.3.jar:?]
at
com.github.benmanes.caffeine.cache.LocalCache.computeIfAbsent(LocalCache.java:108)
~[caffeine-2.9.3.jar:?]
at
com.github.benmanes.caffeine.cache.LocalManualCache.get(LocalManualCache.java:62)
~[caffeine-2.9.3.jar:?]
at org.apache.iceberg.CachingCatalog.loadTable(CachingCatalog.java:167)
~[iceberg-core-1.6.1.jar:?]
at
org.apache.amoro.formats.iceberg.IcebergCatalog.lambda$loadTable$6(IcebergCatalog.java:104)
~[amoro-format-iceberg-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at org.apache.amoro.table.TableMetaStore.call(TableMetaStore.java:268)
~[amoro-common-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at
org.apache.amoro.table.TableMetaStore.lambda$doAs$0(TableMetaStore.java:243)
~[amoro-common-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at java.security.AccessController.doPrivileged(Native Method) ~[?:?]
at javax.security.auth.Subject.doAs(Subject.java:361) ~[?:?]
at
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1918)
~[hadoop-common-2.10.2.jar:?]
at org.apache.amoro.table.TableMetaStore.doAs(TableMetaStore.java:243)
~[amoro-common-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at
org.apache.amoro.formats.iceberg.IcebergCatalog.loadTable(IcebergCatalog.java:101)
~[amoro-format-iceberg-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at
org.apache.amoro.CommonUnifiedCatalog.lambda$loadTable$1(CommonUnifiedCatalog.java:127)
~[amoro-common-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at
java.util.stream.ReferencePipeline$3$1.accept(ReferencePipeline.java:195) ~[?:?]
at
java.util.stream.ReferencePipeline$3$1.accept(ReferencePipeline.java:195) ~[?:?]
at
java.util.stream.ReferencePipeline$2$1.accept(ReferencePipeline.java:177) ~[?:?]
at
java.util.Spliterators$ArraySpliterator.tryAdvance(Spliterators.java:958) ~[?:?]
at
java.util.stream.ReferencePipeline.forEachWithCancel(ReferencePipeline.java:127)
~[?:?]
at
java.util.stream.AbstractPipeline.copyIntoWithCancel(AbstractPipeline.java:502)
~[?:?]
at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:488)
~[?:?]
at
java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:474)
~[?:?]
at java.util.stream.FindOps$FindOp.evaluateSequential(FindOps.java:150)
~[?:?]
at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234)
~[?:?]
at
java.util.stream.ReferencePipeline.findFirst(ReferencePipeline.java:543) ~[?:?]
at
org.apache.amoro.CommonUnifiedCatalog.loadTable(CommonUnifiedCatalog.java:133)
~[amoro-common-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at
org.apache.amoro.server.catalog.ExternalCatalog.lambda$loadTable$11(ExternalCatalog.java:129)
~[amoro-ams-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at org.apache.amoro.table.TableMetaStore.call(TableMetaStore.java:268)
~[amoro-common-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at
org.apache.amoro.table.TableMetaStore.lambda$doAs$0(TableMetaStore.java:243)
~[amoro-common-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at java.security.AccessController.doPrivileged(Native Method) ~[?:?]
at javax.security.auth.Subject.doAs(Subject.java:361) ~[?:?]
at
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1918)
~[hadoop-common-2.10.2.jar:?]
at org.apache.amoro.table.TableMetaStore.doAs(TableMetaStore.java:243)
~[amoro-common-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at
org.apache.amoro.server.catalog.ExternalCatalog.doAs(ExternalCatalog.java:153)
~[amoro-ams-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at
org.apache.amoro.server.catalog.ExternalCatalog.loadTable(ExternalCatalog.java:129)
~[amoro-ams-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at
org.apache.amoro.server.catalog.DefaultCatalogManager.loadTable(DefaultCatalogManager.java:208)
~[amoro-ams-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at
org.apache.amoro.server.table.DefaultTableService.loadTable(DefaultTableService.java:216)
~[amoro-ams-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at
org.apache.amoro.server.scheduler.PeriodicTableScheduler.loadTable(PeriodicTableScheduler.java:162)
~[amoro-ams-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at
org.apache.amoro.server.scheduler.inline.TableRuntimeRefreshExecutor.execute(TableRuntimeRefreshExecutor.java:99)
~[amoro-ams-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at
org.apache.amoro.server.scheduler.PeriodicTableScheduler.executeTask(PeriodicTableScheduler.java:98)
~[amoro-ams-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at
org.apache.amoro.server.scheduler.PeriodicTableScheduler.lambda$scheduleIfNecessary$2(PeriodicTableScheduler.java:110)
~[amoro-ams-0.9-SNAPSHOT.jar:0.9-SNAPSHOT]
at
java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) [?:?]
at java.util.concurrent.FutureTask.run(FutureTask.java:264) [?:?]
at
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304)
[?:?]
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
[?:?]
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
[?:?]
at java.lang.Thread.run(Thread.java:829) [?:?]
Caused by: org.apache.thrift.TApplicationException: Invalid method name:
'get_table_req'
at
org.apache.thrift.TApplicationException.read(TApplicationException.java:111)
~[libthrift-0.9.3.jar:0.9.3]
at org.apache.thrift.TServiceClient.receiveBase(TServiceClient.java:79)
~[libthrift-0.9.3.jar:0.9.3]
at
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Client.recv_get_table_req(ThriftHiveMetastore.java:1567)
~[hive-metastore-2.3.8.jar:2.3.8]
at
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Client.get_table_req(ThriftHiveMetastore.java:1554)
~[hive-metastore-2.3.8.jar:2.3.8]
at
org.apache.hadoop.hive.metastore.HiveMetaStoreClient.getTable(HiveMetaStoreClient.java:1350)
~[hive-metastore-2.3.8.jar:2.3.8]
at jdk.internal.reflect.GeneratedMethodAccessor91.invoke(Unknown Source)
~[?:?]
at
jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
~[?:?]
at java.lang.reflect.Method.invoke(Method.java:566) ~[?:?]
at
org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.invoke(RetryingMetaStoreClient.java:169)
~[hive-metastore-2.3.8.jar:2.3.8]
at com.sun.proxy.$Proxy56.getTable(Unknown Source) ~[?:?]
at
org.apache.iceberg.hive.HiveTableOperations.lambda$doRefresh$0(HiveTableOperations.java:147)
~[iceberg-hive-metastore-1.6.1.jar:?]
at org.apache.iceberg.ClientPoolImpl.run(ClientPoolImpl.java:72)
~[iceberg-core-1.6.1.jar:?]
at org.apache.iceberg.ClientPoolImpl.run(ClientPoolImpl.java:65)
~[iceberg-core-1.6.1.jar:?]
at
org.apache.iceberg.hive.CachedClientPool.run(CachedClientPool.java:122)
~[iceberg-hive-metastore-1.6.1.jar:?]
at
org.apache.iceberg.hive.HiveTableOperations.doRefresh(HiveTableOperations.java:147)
~[iceberg-hive-metastore-1.6.1.jar:?]
... 52 more
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]