Copilot commented on code in PR #6441:
URL: https://github.com/apache/hive/pull/6441#discussion_r3180582707


##########
standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestHMSCachingCatalogStats.java:
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.rest;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.net.URI;
+import java.net.http.HttpClient;
+import java.net.http.HttpRequest;
+import java.net.http.HttpResponse;
+import java.util.Map;
+
+import org.apache.hadoop.hive.metastore.ServletSecurity.AuthType;
+import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DataFiles;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.Catalog;
+import org.apache.iceberg.catalog.Namespace;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.hive.HiveCatalog;
+import org.apache.iceberg.rest.extension.HiveRESTCatalogServerExtension;
+import org.apache.iceberg.rest.responses.HMSCacheStatsResponse;
+import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
+import org.junit.jupiter.api.extension.RegisterExtension;
+
+/**
+ * Integration tests that verify the {@link HMSCachingCatalog} 
cache-statistics counters
+ * (hit, miss, load, hit-rate) are updated correctly and exposed accurately 
via the
+ * {@code GET v1/cache/stats} REST endpoint.
+ *
+ * <p>The server is started with {@link AuthType#NONE} so the tests focus 
purely on
+ * caching behaviour without any authentication noise.
+ */
+@Category(MetastoreCheckinTest.class)
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+class TestHMSCachingCatalogStats {
+
+  /** 5 minutes expressed in milliseconds – the value injected into {@code 
ICEBERG_CATALOG_CACHE_EXPIRY}. */
+  private static final long CACHE_EXPIRY_MS = 5 * 60 * 1_000L;
+
+  @RegisterExtension
+  private static final HiveRESTCatalogServerExtension REST_CATALOG_EXTENSION =
+      HiveRESTCatalogServerExtension.builder(AuthType.NONE)
+          // Without a positive expiry the HMSCatalogFactory skips 
HMSCachingCatalog entirely.
+          .configure(
+              MetastoreConf.ConfVars.ICEBERG_CATALOG_CACHE_EXPIRY.getVarname(),
+              String.valueOf(CACHE_EXPIRY_MS))
+          .configure("hive.in.test", "true")
+          .build();
+
+  private RESTCatalog catalog;
+  private HiveCatalog serverCatalog;
+
+  @BeforeAll
+  void setupAll() {
+    catalog = RCKUtils.initCatalogClient(clientConfig());
+    serverCatalog = 
HMSCachingCatalog.getLatestCache(HMSCachingCatalog::getCatalog);
+  }
+
+  /** Remove any namespace/table created by the test so each run starts clean. 
*/
+  @AfterEach
+  void cleanup() {
+    RCKUtils.purgeCatalogTestEntries(catalog);
+  }
+
+  // 
---------------------------------------------------------------------------
+  // helpers
+  // 
---------------------------------------------------------------------------
+
+  private java.util.Map<String, String> clientConfig() {
+    return java.util.Map.of("uri", REST_CATALOG_EXTENSION.getRestEndpoint());
+  }
+
+  /**
+   * Calls the {@code GET v1/cache/stats} endpoint directly over HTTP and 
returns
+   * the deserialised {@link HMSCacheStatsResponse}.
+   */
+  private HMSCacheStatsResponse fetchCacheStats() throws Exception {
+    String statsUrl = REST_CATALOG_EXTENSION.getRestEndpoint() + 
"/v1/cache/stats";
+    HttpRequest request = HttpRequest.newBuilder()
+        .uri(URI.create(statsUrl))
+        .GET()
+        .build();
+    HttpResponse<String> response;
+    try (HttpClient client = HttpClient.newHttpClient()) {
+      response = client.send(request, HttpResponse.BodyHandlers.ofString());
+    }

Review Comment:
   `HttpClient` does not implement `AutoCloseable`, so using it in a 
try-with-resources block will not compile. Instantiate the client normally (or 
reuse a static/shared instance) and remove the try-with-resources wrapper.
   



##########
standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/hive/MetadataLocator.java:
##########
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.iceberg.hive;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.GetProjectionsSpec;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import 
org.apache.hadoop.hive.metastore.client.builder.GetTableProjectionsSpecBuilder;
+import org.apache.iceberg.BaseMetastoreTableOperations;
+import org.apache.iceberg.ClientPool;
+import org.apache.iceberg.MetadataTableType;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.exceptions.NoSuchTableException;
+import org.apache.thrift.TException;
+
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Fetches the location of a given metadata table.
+ * <p>Since the location mutates with each transaction, this allows 
determining if a cached version of the
+ * table is the latest known in the HMS database.</p>
+ */
+public class MetadataLocator {
+  private static final org.slf4j.Logger LOGGER = 
org.slf4j.LoggerFactory.getLogger(MetadataLocator.class);
+  static final GetProjectionsSpec PARAM_SPEC = new 
GetTableProjectionsSpecBuilder()

Review Comment:
   `PARAM_SPEC` is package-private; with the repo's checkstyle 
`VisibilityModifier` enabled, this is likely to be flagged. Make it `private 
static final` (or otherwise justify/annotate) to comply with visibility rules.
   



##########
standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java:
##########
@@ -7,91 +7,362 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- *   http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 
 package org.apache.iceberg.rest;
 
 import com.github.benmanes.caffeine.cache.Ticker;
+
+import java.lang.ref.SoftReference;
+import java.util.Collections;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.iceberg.BaseMetadataTable;
 import org.apache.iceberg.CachingCatalog;
+import org.apache.iceberg.HasTableOperations;
+import org.apache.iceberg.MetadataTableType;
+import org.apache.iceberg.MetadataTableUtils;
 import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TableOperations;
 import org.apache.iceberg.catalog.Catalog;
 import org.apache.iceberg.catalog.Namespace;
 import org.apache.iceberg.catalog.SupportsNamespaces;
 import org.apache.iceberg.catalog.TableIdentifier;
 import org.apache.iceberg.catalog.ViewCatalog;
 import org.apache.iceberg.exceptions.NamespaceNotEmptyException;
 import org.apache.iceberg.exceptions.NoSuchNamespaceException;
+import org.apache.iceberg.exceptions.NoSuchTableException;
 import org.apache.iceberg.hive.HiveCatalog;
+import org.apache.iceberg.hive.MetadataLocator;
 import org.apache.iceberg.view.View;
 import org.apache.iceberg.view.ViewBuilder;
-
+import org.jetbrains.annotations.TestOnly;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Class that wraps an Iceberg Catalog to cache tables.
  */
 public class HMSCachingCatalog extends CachingCatalog implements 
SupportsNamespaces, ViewCatalog {
+  protected static final Logger LOG = 
LoggerFactory.getLogger(HMSCachingCatalog.class);
+
+  @TestOnly
+  private static SoftReference<HMSCachingCatalog> cacheRef = new 
SoftReference<>(null);
+
+  @TestOnly @SuppressWarnings("unchecked")
+  public static <C extends Catalog> C  
getLatestCache(Function<HMSCachingCatalog, C> extractor) {
+    HMSCachingCatalog cache = cacheRef.get();
+    if (cache == null) {
+      return null;
+    }
+    return extractor == null ? (C) cache : extractor.apply(cache);
+  }
+
+  @TestOnly
+  public HiveCatalog getCatalog() {
+    return hiveCatalog;
+  }
+
+  // The underlying HiveCatalog instance.
   private final HiveCatalog hiveCatalog;
-  
-  public HMSCachingCatalog(HiveCatalog catalog, long expiration) {
-    super(catalog, true, expiration, Ticker.systemTicker());
+  // Duplicate because CachingCatalog doesn't expose the case sensitivity of 
the underlying catalog,
+  // which is needed for canonicalizing identifiers before caching.
+  private final boolean caseSensitive;
+  // The locator.
+  private final MetadataLocator metadataLocator;
+  // An L1 small latency cache.
+  // This is used to cache the last cached time for each table identifier,
+  // so that we can skip location check for repeated access to the same table 
within a short period of time,
+  // which can significantly reduce the latency for repeated access to the 
same table.
+  private final Map<TableIdentifier, Long> l1Cache;
+  // The TTL for L1 cache (3s).
+  private final int L1TTL_MS;
+  // The L1 cache size.
+  private final int L1_CACHE_SIZE;
+

Review Comment:
   Instance fields `L1TTL_MS` and `L1_CACHE_SIZE` are named like constants, but 
they are non-static members. This likely violates the project's checkstyle 
`MemberName` rule; consider renaming to lowerCamelCase (e.g., `l1TtlMs`, 
`l1CacheSize`).



##########
standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java:
##########
@@ -7,91 +7,362 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- *   http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 
 package org.apache.iceberg.rest;
 
 import com.github.benmanes.caffeine.cache.Ticker;
+
+import java.lang.ref.SoftReference;
+import java.util.Collections;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.iceberg.BaseMetadataTable;
 import org.apache.iceberg.CachingCatalog;
+import org.apache.iceberg.HasTableOperations;
+import org.apache.iceberg.MetadataTableType;
+import org.apache.iceberg.MetadataTableUtils;
 import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TableOperations;
 import org.apache.iceberg.catalog.Catalog;
 import org.apache.iceberg.catalog.Namespace;
 import org.apache.iceberg.catalog.SupportsNamespaces;
 import org.apache.iceberg.catalog.TableIdentifier;
 import org.apache.iceberg.catalog.ViewCatalog;
 import org.apache.iceberg.exceptions.NamespaceNotEmptyException;
 import org.apache.iceberg.exceptions.NoSuchNamespaceException;
+import org.apache.iceberg.exceptions.NoSuchTableException;

Review Comment:
   `NoSuchTableException` is imported but not used in this class. Please remove 
the unused import to keep the file clean and avoid checkstyle failures.
   



##########
standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestHMSCachingCatalogStats.java:
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.rest;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.net.URI;
+import java.net.http.HttpClient;
+import java.net.http.HttpRequest;
+import java.net.http.HttpResponse;
+import java.util.Map;
+
+import org.apache.hadoop.hive.metastore.ServletSecurity.AuthType;
+import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DataFiles;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.Catalog;
+import org.apache.iceberg.catalog.Namespace;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.hive.HiveCatalog;
+import org.apache.iceberg.rest.extension.HiveRESTCatalogServerExtension;

Review Comment:
   `org.apache.iceberg.catalog.Catalog` is imported but not used in this test 
class. Please remove the unused import to satisfy compilation/checkstyle rules.



##########
standalone-metastore/metastore-rest-catalog/src/test/java/org/apache/iceberg/rest/TestHMSCachingCatalogStats.java:
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.rest;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.net.URI;
+import java.net.http.HttpClient;
+import java.net.http.HttpRequest;
+import java.net.http.HttpResponse;
+import java.util.Map;
+
+import org.apache.hadoop.hive.metastore.ServletSecurity.AuthType;
+import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DataFiles;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.Catalog;
+import org.apache.iceberg.catalog.Namespace;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.hive.HiveCatalog;
+import org.apache.iceberg.rest.extension.HiveRESTCatalogServerExtension;
+import org.apache.iceberg.rest.responses.HMSCacheStatsResponse;
+import org.junit.experimental.categories.Category;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
+import org.junit.jupiter.api.extension.RegisterExtension;
+
+/**
+ * Integration tests that verify the {@link HMSCachingCatalog} 
cache-statistics counters
+ * (hit, miss, load, hit-rate) are updated correctly and exposed accurately 
via the
+ * {@code GET v1/cache/stats} REST endpoint.
+ *
+ * <p>The server is started with {@link AuthType#NONE} so the tests focus 
purely on
+ * caching behaviour without any authentication noise.
+ */
+@Category(MetastoreCheckinTest.class)
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+class TestHMSCachingCatalogStats {
+
+  /** 5 minutes expressed in milliseconds – the value injected into {@code 
ICEBERG_CATALOG_CACHE_EXPIRY}. */
+  private static final long CACHE_EXPIRY_MS = 5 * 60 * 1_000L;
+
+  @RegisterExtension
+  private static final HiveRESTCatalogServerExtension REST_CATALOG_EXTENSION =
+      HiveRESTCatalogServerExtension.builder(AuthType.NONE)
+          // Without a positive expiry the HMSCatalogFactory skips 
HMSCachingCatalog entirely.
+          .configure(
+              MetastoreConf.ConfVars.ICEBERG_CATALOG_CACHE_EXPIRY.getVarname(),
+              String.valueOf(CACHE_EXPIRY_MS))
+          .configure("hive.in.test", "true")
+          .build();
+
+  private RESTCatalog catalog;
+  private HiveCatalog serverCatalog;
+
+  @BeforeAll
+  void setupAll() {
+    catalog = RCKUtils.initCatalogClient(clientConfig());
+    serverCatalog = 
HMSCachingCatalog.getLatestCache(HMSCachingCatalog::getCatalog);

Review Comment:
   `serverCatalog` is obtained via `HMSCachingCatalog.getLatestCache(...)`, 
which can return null (e.g., if the reference is cleared). The test later 
dereferences `serverCatalog` unconditionally, which can lead to NPE/flaky 
failures. Add an assertion/assumption that `serverCatalog` is non-null (or 
adjust the cache reference mechanism used for tests).
   



##########
standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/responses/HMSCacheStatsResponse.java:
##########
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.iceberg.rest.responses;
+
+import org.apache.iceberg.rest.RESTResponse;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.TreeMap;
+
+public record HMSCacheStatsResponse(Map<String, Number> stats) implements 
RESTResponse {
+  public HMSCacheStatsResponse(Map<String, Number> stats) {
+    this.stats = Collections.unmodifiableMap(new TreeMap<>(stats));

Review Comment:
   `HMSCacheStatsResponse`'s canonical constructor assumes `stats` is non-null 
and will throw an NPE if the JSON omits the field or sets it to null. Consider 
defaulting null to an empty map (and/or validating in `validate()`) to make the 
response type robust to deserialization edge cases.
   



##########
standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/hive/MetadataLocator.java:
##########
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.iceberg.hive;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.GetProjectionsSpec;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import 
org.apache.hadoop.hive.metastore.client.builder.GetTableProjectionsSpecBuilder;
+import org.apache.iceberg.BaseMetastoreTableOperations;
+import org.apache.iceberg.ClientPool;
+import org.apache.iceberg.MetadataTableType;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.exceptions.NoSuchTableException;
+import org.apache.thrift.TException;
+
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Fetches the location of a given metadata table.
+ * <p>Since the location mutates with each transaction, this allows 
determining if a cached version of the
+ * table is the latest known in the HMS database.</p>
+ */
+public class MetadataLocator {
+  private static final org.slf4j.Logger LOGGER = 
org.slf4j.LoggerFactory.getLogger(MetadataLocator.class);
+  static final GetProjectionsSpec PARAM_SPEC = new 
GetTableProjectionsSpecBuilder()
+      .includeParameters()  // only fetches table.parameters
+      .build();
+  private final HiveCatalog catalog;
+
+  public MetadataLocator(HiveCatalog catalog) {
+    this.catalog = catalog;
+  }
+
+  public HiveCatalog getCatalog() {
+    return catalog;
+  }
+
+  /**
+   * Returns the location of the metadata table identified by the given 
identifier, or null if the table does not exist or is
+   * not a metadata table.
+   * <p>This uses the Thrift API to fetch the table parameters, which is more 
efficient than fetching the entire table object.</p>
+   * @param  identifier the identifier of the metadata table to fetch the 
location for
+   * @return the location of the metadata table, or null if the table does not 
exist or is not a metadata table
+   */
+  public String getLocation(TableIdentifier identifier) {
+    final ClientPool<IMetaStoreClient, TException> clients = 
catalog.clientPool();
+    final String catName = catalog.name();
+    final TableIdentifier baseTableIdentifier;
+    if (!catalog.isValidIdentifier(identifier)) {
+      if (!isValidMetadataIdentifier(identifier)) {
+        return null;
+      } else {
+        baseTableIdentifier = 
TableIdentifier.of(identifier.namespace().levels());
+      }
+    } else {
+      baseTableIdentifier = identifier;
+    }
+    String database = baseTableIdentifier.namespace().level(0);
+    String tableName = baseTableIdentifier.name();
+    try {
+      List<Table> tables = clients.run(
+          client -> client.getTables(catName, database, 
Collections.singletonList(tableName), PARAM_SPEC)
+      );
+      return tables == null || tables.isEmpty()
+          ? null
+          : 
tables.getFirst().getParameters().get(BaseMetastoreTableOperations.METADATA_LOCATION_PROP);
+    } catch (NoSuchTableException e) {
+      LOGGER.debug("Table {} not found: {}", baseTableIdentifier, 
e.getMessage());
+      throw e;
+    } catch (NoSuchObjectException e) {
+      throw new NoSuchTableException("Table {} not found: {}", 
baseTableIdentifier, e.getMessage());

Review Comment:
   The `NoSuchTableException` message template uses `{}` placeholders, but 
Iceberg exceptions typically use `String.format`-style formatting. As written, 
the message is likely to include literal `{}` instead of the table name. Use 
the correct placeholder style (e.g., `%s`) or build the message explicitly.
   



##########
standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java:
##########
@@ -7,91 +7,362 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- *   http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 
 package org.apache.iceberg.rest;
 
 import com.github.benmanes.caffeine.cache.Ticker;
+
+import java.lang.ref.SoftReference;
+import java.util.Collections;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.iceberg.BaseMetadataTable;
 import org.apache.iceberg.CachingCatalog;
+import org.apache.iceberg.HasTableOperations;
+import org.apache.iceberg.MetadataTableType;
+import org.apache.iceberg.MetadataTableUtils;
 import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TableOperations;
 import org.apache.iceberg.catalog.Catalog;
 import org.apache.iceberg.catalog.Namespace;
 import org.apache.iceberg.catalog.SupportsNamespaces;
 import org.apache.iceberg.catalog.TableIdentifier;
 import org.apache.iceberg.catalog.ViewCatalog;
 import org.apache.iceberg.exceptions.NamespaceNotEmptyException;
 import org.apache.iceberg.exceptions.NoSuchNamespaceException;
+import org.apache.iceberg.exceptions.NoSuchTableException;
 import org.apache.iceberg.hive.HiveCatalog;
+import org.apache.iceberg.hive.MetadataLocator;
 import org.apache.iceberg.view.View;
 import org.apache.iceberg.view.ViewBuilder;
-
+import org.jetbrains.annotations.TestOnly;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Class that wraps an Iceberg Catalog to cache tables.
  */
 public class HMSCachingCatalog extends CachingCatalog implements 
SupportsNamespaces, ViewCatalog {
+  protected static final Logger LOG = 
LoggerFactory.getLogger(HMSCachingCatalog.class);
+
+  @TestOnly
+  private static SoftReference<HMSCachingCatalog> cacheRef = new 
SoftReference<>(null);
+
+  @TestOnly @SuppressWarnings("unchecked")
+  public static <C extends Catalog> C  
getLatestCache(Function<HMSCachingCatalog, C> extractor) {
+    HMSCachingCatalog cache = cacheRef.get();
+    if (cache == null) {
+      return null;
+    }
+    return extractor == null ? (C) cache : extractor.apply(cache);
+  }
+
+  @TestOnly
+  public HiveCatalog getCatalog() {
+    return hiveCatalog;
+  }
+
+  // The underlying HiveCatalog instance.
   private final HiveCatalog hiveCatalog;
-  
-  public HMSCachingCatalog(HiveCatalog catalog, long expiration) {
-    super(catalog, true, expiration, Ticker.systemTicker());
+  // Duplicate because CachingCatalog doesn't expose the case sensitivity of 
the underlying catalog,
+  // which is needed for canonicalizing identifiers before caching.
+  private final boolean caseSensitive;
+  // The locator.
+  private final MetadataLocator metadataLocator;
+  // An L1 small latency cache.
+  // This is used to cache the last cached time for each table identifier,
+  // so that we can skip location check for repeated access to the same table 
within a short period of time,
+  // which can significantly reduce the latency for repeated access to the 
same table.
+  private final Map<TableIdentifier, Long> l1Cache;
+  // The TTL for L1 cache (3s).
+  private final int L1TTL_MS;
+  // The L1 cache size.
+  private final int L1_CACHE_SIZE;
+
+  // Metrics counters.
+  private final AtomicLong cacheHitCount = new AtomicLong(0);
+  private final AtomicLong cacheMissCount = new AtomicLong(0);
+  private final AtomicLong cacheLoadCount = new AtomicLong(0);
+  private final AtomicLong cacheInvalidateCount = new AtomicLong(0);
+  private final AtomicLong cacheMetaLoadCount = new AtomicLong(0);
+
+  public HMSCachingCatalog(HiveCatalog catalog, long expirationMs) {
+    this(catalog, expirationMs, /*caseSensitive*/ true);
+  }
+
+  public HMSCachingCatalog(HiveCatalog catalog, long expirationMs, boolean 
caseSensitive) {
+    super(catalog, caseSensitive, expirationMs, Ticker.systemTicker());
     this.hiveCatalog = catalog;
+    this.caseSensitive = caseSensitive;
+    this.metadataLocator = new MetadataLocator(catalog);
+    Configuration conf = catalog.getConf();
+    if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST)) {
+      // Only keep a reference to the latest cache for testing purpose, so 
that tests can manipulate the catalog.
+      cacheRef = new SoftReference<>(this);
+    }
+    int l1size = conf.getInt("hms.caching.catalog.l1.cache.size", 32);
+    int l1ttl = conf.getInt("hms.caching.catalog.l1.cache.ttl", 3_000);
+    if (l1size > 0 && l1ttl > 0) {
+       l1Cache = Collections.synchronizedMap(new 
LinkedHashMap<TableIdentifier, Long>() {
+        @Override
+        protected boolean removeEldestEntry(Map.Entry<TableIdentifier, Long> 
eldest) {
+          return size() > L1_CACHE_SIZE;
+        }
+      });
+       L1TTL_MS = l1ttl;
+       L1_CACHE_SIZE = l1size;
+    } else {
+       l1Cache = Collections.emptyMap();
+       L1TTL_MS = 0;
+       L1_CACHE_SIZE = 0;
+    }
+  }
+
+  /**
+   * Callback when cache invalidates the entry for a given table identifier.
+   *
+   * @param tid the table identifier to invalidate
+   */
+  protected void onCacheInvalidate(TableIdentifier tid) {
+    long count = cacheInvalidateCount.incrementAndGet();
+    LOG.debug("Cache invalidate {}: {}", tid, count);
+  }
+
+  /**
+   * Callback when cache loads a table for a given table identifier.
+   *
+   * @param tid the table identifier
+   */
+  protected void onCacheLoad(TableIdentifier tid) {
+    long count = cacheLoadCount.incrementAndGet();
+    LOG.debug("Cache load {}: {}", tid, count);
+  }
+
+  /**
+   * Callback when cache hit for a given table identifier.
+   *
+   * @param tid the table identifier
+   */
+  protected void onCacheHit(TableIdentifier tid) {
+    long count = cacheHitCount.incrementAndGet();
+    LOG.debug("Cache hit {} : {}", tid, count);
   }
 
+  /**
+   * Callback when cache miss occurs for a given table identifier.
+   *
+   * @param tid the table identifier
+   */
+  protected void onCacheMiss(TableIdentifier tid) {
+    long count = cacheMissCount.incrementAndGet();
+    LOG.debug("Cache miss {}: {}", tid, count);
+  }
+
+  /**
+   * Callback when cache loads a metadata table for a given table identifier.
+   *
+   * @param tid the table identifier
+   */
+  protected void onCacheMetaLoad(TableIdentifier tid) {
+    long count = cacheMetaLoadCount.incrementAndGet();
+    LOG.debug("Cache meta-load {}: {}", tid, count);
+  }
+
+  // Getter methods for accessing metrics
+  public long getCacheHitCount() {
+    return cacheHitCount.get();
+  }
+
+  public long getCacheMissCount() {
+    return cacheMissCount.get();
+  }
+
+  public long getCacheLoadCount() {
+    return cacheLoadCount.get();
+  }
+
+  public long getCacheInvalidateCount() {
+    return cacheInvalidateCount.get();
+  }
+
+  public long getCacheMetaLoadCount() {
+    return cacheMetaLoadCount.get();
+  }
+
+  public double getCacheHitRate() {
+    long hits = cacheHitCount.get();
+    long total = hits + cacheMissCount.get();
+    return total == 0 ? 0.0 : (double) hits / total;
+  }
+
+  /**
+   * Generates a map of this cache's performance metrics, including hit count,
+   * miss count, load count, invalidate count, meta-load count, and hit rate.
+   * This can be used for monitoring and debugging purposes to understand the 
effectiveness of the cache.
+   * @return a map of cache performance metrics
+   */
+  public Map<String, Number> cacheStats() {
+    return Map.of(
+            "hit", getCacheHitCount(),
+            "miss", getCacheMissCount(),
+            "load", getCacheLoadCount(),
+            "invalidate", getCacheInvalidateCount(),
+            "metaload", getCacheMetaLoadCount(),
+            "hit-rate", getCacheHitRate()
+    );
+  }
+
+
   @Override
-  public Catalog.TableBuilder buildTable(TableIdentifier identifier, Schema 
schema) {
-    return hiveCatalog.buildTable(identifier, schema);
+  public void createNamespace(Namespace namespace, Map<String, String> map) {
+    hiveCatalog.createNamespace(namespace, map);
   }
 
   @Override
-  public void createNamespace(Namespace nmspc, Map<String, String> map) {
-    hiveCatalog.createNamespace(nmspc, map);
+  public List<Namespace> listNamespaces(Namespace namespace) throws 
NoSuchNamespaceException {
+    return hiveCatalog.listNamespaces(namespace);
+  }
+
+  /**
+   * Canonicalizes the given table identifier based on the case sensitivity of 
the underlying catalog.
+   * Copied from CachingCatalog that exposes it as private.
+   * @param tableIdentifier the table identifier to canonicalize
+   * @return the canonicalized table identifier
+   */
+  private TableIdentifier canonicalizeIdentifier(TableIdentifier 
tableIdentifier) {
+    return this.caseSensitive ? tableIdentifier : 
tableIdentifier.toLowerCase();
   }
 
   @Override
-  public List<Namespace> listNamespaces(Namespace nmspc) throws 
NoSuchNamespaceException {
-    return hiveCatalog.listNamespaces(nmspc);
+  public void invalidateTable(TableIdentifier ident) {
+    super.invalidateTable(ident);
+    l1Cache.remove(ident);
+  }
+
+  @Override
+  public Table loadTable(final TableIdentifier identifier) {
+    final TableIdentifier canonicalized = canonicalizeIdentifier(identifier);
+    final Table cachedTable = tableCache.getIfPresent(canonicalized);
+    long now = System.currentTimeMillis();
+    if (cachedTable != null) {
+      // Determine if L1 cache is valid based on the last cached time and the 
TTL.
+      // If the table is in L1 cache, we can skip the location check and 
return the cached table directly,
+      // which can significantly reduce the latency for repeated access to the 
same table.
+      Long lastCached = l1Cache.get(canonicalized);
+      if (lastCached != null) {
+        if (now - lastCached < L1TTL_MS) {
+          LOG.debug("Table {} is in L1 cache, returning cached table", 
canonicalized);
+          onCacheHit(canonicalized);
+          return cachedTable;
+        } else {
+          l1Cache.remove(canonicalized);
+        }
+      }
+      // If the table is no longer in L1 cache, we need to check the location.
+      final String location = metadataLocator.getLocation(canonicalized);
+      if (location == null) {
+        LOG.debug("Table {} has no location, returning cached table without 
location", canonicalized);
+        onCacheHit(canonicalized);
+        l1Cache.put(canonicalized, now);
+        return cachedTable;
+      }
+      String cachedLocation = cachedTable instanceof HasTableOperations 
tableOps
+              ? tableOps.operations().current().metadataFileLocation()
+              : null;
+      if (location.equals(cachedLocation)) {
+        onCacheHit(canonicalized);
+        l1Cache.put(canonicalized, now);
+        return cachedTable;
+      } else {
+        LOG.debug("Invalidate table {}, cached {} != actual {}", 
canonicalized, cachedLocation, location);
+        // Invalidate the cached table if the location is different
+        invalidateTable(canonicalized);
+        onCacheInvalidate(canonicalized);
+      }
+    } else {
+      onCacheMiss(canonicalized);
+    }
+    // The following code is copied from CachingCatalog.loadTable(), but with 
additional handling for L1 cache and stats.
+    final Table table = tableCache.get(canonicalized, 
this::loadTableWithoutCache);
+    if (table instanceof BaseMetadataTable) {
+      // Cache underlying table: there must be a table named by the namespace 
(?)
+      TableIdentifier originTableIdentifier = 
TableIdentifier.of(canonicalized.namespace().levels());
+      Table originTable = tableCache.get(originTableIdentifier, 
this::loadTableWithoutCache);
+      // Share TableOperations instance of origin table for all metadata 
tables, so that metadata
+      // table instances are refreshed as well when origin table instance is 
refreshed.
+      if (originTable instanceof HasTableOperations tableOps) {
+        TableOperations ops = tableOps.operations();
+        MetadataTableType type = MetadataTableType.from(canonicalized.name());
+        // Defensive: CachingCatalog doesn't perform this check
+        if (type != null) {
+          Table metadataTable = 
MetadataTableUtils.createMetadataTableInstance(ops, hiveCatalog.name(), 
originTableIdentifier, canonicalized, type);
+          tableCache.put(canonicalized, metadataTable);
+          l1Cache.put(canonicalized, now);
+          onCacheMetaLoad(canonicalized);
+          LOG.debug("Loaded metadata table: {} for origin table: {}", 
canonicalized, originTableIdentifier);
+          // Return the metadata table instead of the original table
+          return metadataTable;
+        }
+      }
+    }
+    onCacheLoad(canonicalized);
+    return table;
+  }

Review Comment:
   The L1 cache is intended to skip the HMS location check for recently 
accessed tables, but after a cache miss and successful load, the code does not 
record `canonicalized` in `l1Cache`. This means the very next access will still 
perform an HMS lookup even though the table was just loaded. Consider adding 
`l1Cache.put(canonicalized, now)` (or using the actual post-load time) after a 
successful load.



##########
standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/hive/MetadataLocator.java:
##########
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.iceberg.hive;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.GetProjectionsSpec;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import 
org.apache.hadoop.hive.metastore.client.builder.GetTableProjectionsSpecBuilder;
+import org.apache.iceberg.BaseMetastoreTableOperations;
+import org.apache.iceberg.ClientPool;
+import org.apache.iceberg.MetadataTableType;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.exceptions.NoSuchTableException;
+import org.apache.thrift.TException;
+
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Fetches the location of a given metadata table.
+ * <p>Since the location mutates with each transaction, this allows 
determining if a cached version of the
+ * table is the latest known in the HMS database.</p>
+ */
+public class MetadataLocator {
+  private static final org.slf4j.Logger LOGGER = 
org.slf4j.LoggerFactory.getLogger(MetadataLocator.class);
+  static final GetProjectionsSpec PARAM_SPEC = new 
GetTableProjectionsSpecBuilder()
+      .includeParameters()  // only fetches table.parameters
+      .build();
+  private final HiveCatalog catalog;
+
+  public MetadataLocator(HiveCatalog catalog) {
+    this.catalog = catalog;
+  }
+
+  public HiveCatalog getCatalog() {
+    return catalog;
+  }
+
+  /**
+   * Returns the location of the metadata table identified by the given 
identifier, or null if the table does not exist or is
+   * not a metadata table.
+   * <p>This uses the Thrift API to fetch the table parameters, which is more 
efficient than fetching the entire table object.</p>
+   * @param  identifier the identifier of the metadata table to fetch the 
location for
+   * @return the location of the metadata table, or null if the table does not 
exist or is not a metadata table

Review Comment:
   The Javadoc for `getLocation` says it returns null if the table does not 
exist, but the implementation rethrows `NoSuchTableException` (and also throws 
for `NoSuchObjectException`). Update the Javadoc to reflect the thrown 
exceptions or change the implementation to consistently return null for 
non-existent tables.
   



##########
standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCachingCatalog.java:
##########
@@ -7,91 +7,362 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- *   http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 
 package org.apache.iceberg.rest;
 
 import com.github.benmanes.caffeine.cache.Ticker;
+
+import java.lang.ref.SoftReference;
+import java.util.Collections;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.iceberg.BaseMetadataTable;
 import org.apache.iceberg.CachingCatalog;
+import org.apache.iceberg.HasTableOperations;
+import org.apache.iceberg.MetadataTableType;
+import org.apache.iceberg.MetadataTableUtils;
 import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TableOperations;
 import org.apache.iceberg.catalog.Catalog;
 import org.apache.iceberg.catalog.Namespace;
 import org.apache.iceberg.catalog.SupportsNamespaces;
 import org.apache.iceberg.catalog.TableIdentifier;
 import org.apache.iceberg.catalog.ViewCatalog;
 import org.apache.iceberg.exceptions.NamespaceNotEmptyException;
 import org.apache.iceberg.exceptions.NoSuchNamespaceException;
+import org.apache.iceberg.exceptions.NoSuchTableException;
 import org.apache.iceberg.hive.HiveCatalog;
+import org.apache.iceberg.hive.MetadataLocator;
 import org.apache.iceberg.view.View;
 import org.apache.iceberg.view.ViewBuilder;
-
+import org.jetbrains.annotations.TestOnly;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Class that wraps an Iceberg Catalog to cache tables.
  */
 public class HMSCachingCatalog extends CachingCatalog implements 
SupportsNamespaces, ViewCatalog {
+  protected static final Logger LOG = 
LoggerFactory.getLogger(HMSCachingCatalog.class);
+
+  @TestOnly
+  private static SoftReference<HMSCachingCatalog> cacheRef = new 
SoftReference<>(null);
+
+  @TestOnly @SuppressWarnings("unchecked")
+  public static <C extends Catalog> C  
getLatestCache(Function<HMSCachingCatalog, C> extractor) {
+    HMSCachingCatalog cache = cacheRef.get();
+    if (cache == null) {
+      return null;
+    }
+    return extractor == null ? (C) cache : extractor.apply(cache);
+  }

Review Comment:
   Using a `SoftReference` for `cacheRef` can cause it to be cleared under 
memory pressure, making `getLatestCache(...)` return null and potentially 
causing test flakiness (the new stats test depends on this). Since this 
reference is only populated when `HIVE_IN_TEST` is true, consider using a 
strong reference (or an `AtomicReference`) and explicitly clearing it when 
appropriate.



##########
standalone-metastore/metastore-rest-catalog/src/main/java/org/apache/iceberg/rest/HMSCatalogAdapter.java:
##########
@@ -407,84 +417,35 @@ private static void commitTransaction(Catalog catalog, 
CommitTransactionRequest
   }
   
   @SuppressWarnings({"MethodLength", "unchecked"})
-  private <T extends RESTResponse> T handleRequest(
-      Route route, Map<String, String> vars, Object body) {
-    switch (route) {
-      case CONFIG:
-        return (T) config();
-
-      case LIST_NAMESPACES:
-        return (T) listNamespaces(vars);
-
-      case CREATE_NAMESPACE:
-        return (T) createNamespace(body);
-
-      case NAMESPACE_EXISTS:
-        return (T) namespaceExists(vars);
-
-      case LOAD_NAMESPACE:
-        return (T) loadNamespace(vars);
-
-      case DROP_NAMESPACE:
-        return (T) dropNamespace(vars);
-
-      case UPDATE_NAMESPACE:
-        return (T) updateNamespace(vars, body);
-
-      case LIST_TABLES:
-        return (T) listTables(vars);
-
-      case CREATE_TABLE:
-        return (T) createTable(vars, body);
-
-      case DROP_TABLE:
-        return (T) dropTable(vars);
-
-      case TABLE_EXISTS:
-        return (T) tableExists(vars);
-
-      case LOAD_TABLE:
-        return (T) loadTable(vars);
-
-      case REGISTER_TABLE:
-        return (T) registerTable(vars, body);
-
-      case UPDATE_TABLE:
-        return (T) updateTable(vars, body);
-
-      case RENAME_TABLE:
-        return (T) renameTable(body);
-
-      case REPORT_METRICS:
-        return (T) reportMetrics(body);
-
-      case COMMIT_TRANSACTION:
-        return (T) commitTransaction(body);
-        
-      case LIST_VIEWS:
-        return (T) listViews(vars);
-
-      case CREATE_VIEW:
-          return (T) createView(vars, body);
-
-      case VIEW_EXISTS:
-        return (T) viewExists(vars);
-
-      case LOAD_VIEW:
-        return (T) loadView(vars);
-
-      case UPDATE_VIEW:
-        return (T) updateView(vars, body);
-        
-      case RENAME_VIEW:
-        return (T) renameView(body);
-        
-      case DROP_VIEW:
-        return (T) dropView(vars);
-
-      default:
-    }
-    return null;
+  private <T extends RESTResponse> T handleRequest(Route route, Map<String, 
String> vars, Object body) {
+    return switch (route) {
+      case CONFIG -> (T) config();
+      case LIST_NAMESPACES -> (T) listNamespaces(vars);
+      case CREATE_NAMESPACE -> (T) createNamespace(body);
+      case NAMESPACE_EXISTS -> (T) namespaceExists(vars);
+      case LOAD_NAMESPACE -> (T) loadNamespace(vars);
+      case DROP_NAMESPACE -> (T) dropNamespace(vars);
+      case UPDATE_NAMESPACE -> (T) updateNamespace(vars, body);
+      case LIST_TABLES -> (T) listTables(vars);
+      case CREATE_TABLE -> (T) createTable(vars, body);
+      case DROP_TABLE -> (T) dropTable(vars);
+      case TABLE_EXISTS -> (T) tableExists(vars);
+      case LOAD_TABLE -> (T) loadTable(vars);
+      case REGISTER_TABLE -> (T) registerTable(vars, body);
+      case UPDATE_TABLE -> (T) updateTable(vars, body);
+      case RENAME_TABLE -> (T) renameTable(body);
+      case REPORT_METRICS -> (T) reportMetrics(body);
+      case COMMIT_TRANSACTION -> (T) commitTransaction(body);
+      case LIST_VIEWS -> (T) listViews(vars);
+      case CREATE_VIEW -> (T) createView(vars, body);
+      case VIEW_EXISTS -> (T) viewExists(vars);
+      case LOAD_VIEW -> (T) loadView(vars);
+      case UPDATE_VIEW -> (T) updateView(vars, body);
+      case RENAME_VIEW -> (T) renameView(body);
+      case DROP_VIEW -> (T) dropView(vars);
+      case CACHE_STATS -> (T) cacheStats();
+      default -> null;
+    };

Review Comment:
   The `switch` over `Route` includes a `default -> null`, which prevents the 
compiler from enforcing exhaustiveness when new routes are added. Consider 
removing the `default` branch so missing handlers become compile-time errors, 
and throw an explicit exception if an unsupported route is encountered.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to