This is an automated email from the ASF dual-hosted git repository.

singhpk234 pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/iceberg.git


The following commit(s) were added to refs/heads/main by this push:
     new 966003c47b Core: Add server-side implementation of remote scan 
planning to RESTCatalogAdapter (#14480)
966003c47b is described below

commit 966003c47b92a2a6f735a72f36f405f69463f469
Author: Amogh Jahagirdar <[email protected]>
AuthorDate: Wed Nov 12 12:27:16 2025 +0000

    Core: Add server-side implementation of remote scan planning to 
RESTCatalogAdapter (#14480)
    
    * Core: Add server-side implementation of remote scan planning to 
RESTCatalogAdapter
    
    Co-authored-by: Prashant Singh 
<[email protected]>
    
    * Address feedback
    
    * Make initial task sequence number a constant
    
    ---------
    
    Co-authored-by: Prashant Singh 
<[email protected]>
---
 .../org/apache/iceberg/rest/CatalogHandlers.java   | 160 +++++++++++++++++++++
 .../apache/iceberg/rest/InMemoryPlanningState.java | 132 +++++++++++++++++
 .../apache/iceberg/rest/RESTCatalogAdapter.java    |  69 +++++++++
 .../test/java/org/apache/iceberg/rest/Route.java   |  24 +++-
 4 files changed, 384 insertions(+), 1 deletion(-)

diff --git a/core/src/main/java/org/apache/iceberg/rest/CatalogHandlers.java 
b/core/src/main/java/org/apache/iceberg/rest/CatalogHandlers.java
index 83c165ed66..9d1c6d6bbf 100644
--- a/core/src/main/java/org/apache/iceberg/rest/CatalogHandlers.java
+++ b/core/src/main/java/org/apache/iceberg/rest/CatalogHandlers.java
@@ -30,11 +30,17 @@ import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.function.Predicate;
+import java.util.function.ToIntFunction;
 import java.util.stream.Collectors;
 import org.apache.iceberg.BaseMetadataTable;
 import org.apache.iceberg.BaseTable;
 import org.apache.iceberg.BaseTransaction;
+import org.apache.iceberg.FileScanTask;
 import org.apache.iceberg.MetadataUpdate.UpgradeFormatVersion;
 import org.apache.iceberg.PartitionSpec;
 import org.apache.iceberg.Schema;
@@ -42,6 +48,7 @@ import org.apache.iceberg.SortOrder;
 import org.apache.iceberg.Table;
 import org.apache.iceberg.TableMetadata;
 import org.apache.iceberg.TableOperations;
+import org.apache.iceberg.TableScan;
 import org.apache.iceberg.Transaction;
 import org.apache.iceberg.UpdateRequirement;
 import org.apache.iceberg.catalog.Catalog;
@@ -55,23 +62,29 @@ import 
org.apache.iceberg.exceptions.NoSuchNamespaceException;
 import org.apache.iceberg.exceptions.NoSuchTableException;
 import org.apache.iceberg.exceptions.NoSuchViewException;
 import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
+import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
 import org.apache.iceberg.relocated.com.google.common.collect.Maps;
 import org.apache.iceberg.relocated.com.google.common.collect.Sets;
 import org.apache.iceberg.rest.RESTCatalogProperties.SnapshotMode;
 import org.apache.iceberg.rest.requests.CreateNamespaceRequest;
 import org.apache.iceberg.rest.requests.CreateTableRequest;
 import org.apache.iceberg.rest.requests.CreateViewRequest;
+import org.apache.iceberg.rest.requests.FetchScanTasksRequest;
+import org.apache.iceberg.rest.requests.PlanTableScanRequest;
 import org.apache.iceberg.rest.requests.RegisterTableRequest;
 import org.apache.iceberg.rest.requests.RenameTableRequest;
 import org.apache.iceberg.rest.requests.UpdateNamespacePropertiesRequest;
 import org.apache.iceberg.rest.requests.UpdateTableRequest;
 import org.apache.iceberg.rest.responses.CreateNamespaceResponse;
+import org.apache.iceberg.rest.responses.FetchPlanningResultResponse;
+import org.apache.iceberg.rest.responses.FetchScanTasksResponse;
 import org.apache.iceberg.rest.responses.GetNamespaceResponse;
 import org.apache.iceberg.rest.responses.ImmutableLoadViewResponse;
 import org.apache.iceberg.rest.responses.ListNamespacesResponse;
 import org.apache.iceberg.rest.responses.ListTablesResponse;
 import org.apache.iceberg.rest.responses.LoadTableResponse;
 import org.apache.iceberg.rest.responses.LoadViewResponse;
+import org.apache.iceberg.rest.responses.PlanTableScanResponse;
 import org.apache.iceberg.rest.responses.UpdateNamespacePropertiesResponse;
 import org.apache.iceberg.util.Pair;
 import org.apache.iceberg.util.Tasks;
@@ -86,6 +99,9 @@ import org.apache.iceberg.view.ViewRepresentation;
 public class CatalogHandlers {
   private static final Schema EMPTY_SCHEMA = new Schema();
   private static final String INITIAL_PAGE_TOKEN = "";
+  private static final InMemoryPlanningState IN_MEMORY_PLANNING_STATE =
+      InMemoryPlanningState.getInstance();
+  private static final ExecutorService ASYNC_PLANNING_POOL = 
Executors.newSingleThreadExecutor();
 
   private CatalogHandlers() {}
 
@@ -619,4 +635,148 @@ public class CatalogHandlers {
 
     return ops.current();
   }
+
+  public static PlanTableScanResponse planTableScan(
+      Catalog catalog,
+      TableIdentifier ident,
+      PlanTableScanRequest request,
+      Predicate<TableScan> shouldPlanAsync,
+      ToIntFunction<TableScan> tasksPerPlanTask) {
+    Table table = catalog.loadTable(ident);
+    TableScan tableScan = table.newScan();
+
+    if (request.snapshotId() != null) {
+      tableScan = tableScan.useSnapshot(request.snapshotId());
+    }
+    if (request.select() != null) {
+      tableScan = tableScan.select(request.select());
+    }
+    if (request.filter() != null) {
+      tableScan = tableScan.filter(request.filter());
+    }
+    if (request.statsFields() != null) {
+      tableScan = tableScan.includeColumnStats(request.statsFields());
+    }
+
+    tableScan = tableScan.caseSensitive(request.caseSensitive());
+
+    if (shouldPlanAsync.test(tableScan)) {
+      String asyncPlanId = UUID.randomUUID().toString();
+      asyncPlanFiles(tableScan, asyncPlanId, 
tasksPerPlanTask.applyAsInt(tableScan));
+      return PlanTableScanResponse.builder()
+          .withPlanId(asyncPlanId)
+          .withPlanStatus(PlanStatus.SUBMITTED)
+          .withSpecsById(table.specs())
+          .build();
+    }
+
+    String planId = UUID.randomUUID().toString();
+    planFilesFor(tableScan, planId, tasksPerPlanTask.applyAsInt(tableScan));
+    Pair<List<FileScanTask>, String> initial = 
IN_MEMORY_PLANNING_STATE.initialScanTasksFor(planId);
+    return PlanTableScanResponse.builder()
+        .withPlanStatus(PlanStatus.COMPLETED)
+        .withPlanTasks(IN_MEMORY_PLANNING_STATE.nextPlanTask(initial.second()))
+        .withFileScanTasks(initial.first())
+        .withDeleteFiles(
+            initial.first().stream()
+                .flatMap(task -> task.deletes().stream())
+                .distinct()
+                .collect(Collectors.toList()))
+        .withSpecsById(table.specs())
+        .build();
+  }
+
+  /**
+   * Fetches the planning result for an async plan.
+   *
+   * @param catalog the catalog to use for loading the table
+   * @param ident the table identifier
+   * @param planId the plan identifier
+   * @return the fetch planning result response
+   */
+  public static FetchPlanningResultResponse fetchPlanningResult(
+      Catalog catalog, TableIdentifier ident, String planId) {
+    Table table = catalog.loadTable(ident);
+    Pair<List<FileScanTask>, String> initial = 
IN_MEMORY_PLANNING_STATE.initialScanTasksFor(planId);
+    return FetchPlanningResultResponse.builder()
+        .withPlanStatus(PlanStatus.COMPLETED)
+        .withDeleteFiles(
+            initial.first().stream()
+                .flatMap(task -> task.deletes().stream())
+                .distinct()
+                .collect(Collectors.toList()))
+        .withFileScanTasks(initial.first())
+        .withPlanTasks(IN_MEMORY_PLANNING_STATE.nextPlanTask(initial.second()))
+        .withSpecsById(table.specs())
+        .build();
+  }
+
+  /**
+   * Fetches scan tasks for a specific plan task.
+   *
+   * @param catalog the catalog to use for loading the table
+   * @param ident the table identifier
+   * @param request the fetch scan tasks request
+   * @return the fetch scan tasks response
+   */
+  public static FetchScanTasksResponse fetchScanTasks(
+      Catalog catalog, TableIdentifier ident, FetchScanTasksRequest request) {
+    Table table = catalog.loadTable(ident);
+    String planTask = request.planTask();
+    List<FileScanTask> fileScanTasks = 
IN_MEMORY_PLANNING_STATE.fileScanTasksForPlanTask(planTask);
+
+    return FetchScanTasksResponse.builder()
+        .withFileScanTasks(fileScanTasks)
+        .withPlanTasks(IN_MEMORY_PLANNING_STATE.nextPlanTask(planTask))
+        .withSpecsById(table.specs())
+        .withDeleteFiles(
+            fileScanTasks.stream()
+                .flatMap(task -> task.deletes().stream())
+                .distinct()
+                .collect(Collectors.toList()))
+        .build();
+  }
+
+  /**
+   * Cancels a plan table scan by removing all associated state.
+   *
+   * @param planId the plan identifier to cancel
+   */
+  public static void cancelPlanTableScan(String planId) {
+    IN_MEMORY_PLANNING_STATE.removePlan(planId);
+  }
+
+  static void clearPlanningState() {
+    InMemoryPlanningState.getInstance().clear();
+    ASYNC_PLANNING_POOL.shutdown();
+  }
+
+  /**
+   * Plans file scan tasks for a table scan, grouping them into plan tasks for 
pagination.
+   *
+   * @param tableScan the table scan to plan
+   * @param planId the unique identifier for this plan
+   * @param tasksPerPlanTask number of file scan tasks to group per plan task
+   */
+  private static void planFilesFor(TableScan tableScan, String planId, int 
tasksPerPlanTask) {
+    Iterable<List<FileScanTask>> taskGroupings =
+        Iterables.partition(tableScan.planFiles(), tasksPerPlanTask);
+    int planTaskSequence = 0;
+    String previousPlanTask = null;
+    for (List<FileScanTask> taskGrouping : taskGroupings) {
+      String planTaskKey =
+          String.format("%s-%s-%s", planId, tableScan.table().uuid(), 
planTaskSequence++);
+      IN_MEMORY_PLANNING_STATE.addPlanTask(planTaskKey, taskGrouping);
+      if (previousPlanTask != null) {
+        IN_MEMORY_PLANNING_STATE.addNextPlanTask(previousPlanTask, 
planTaskKey);
+      }
+
+      previousPlanTask = planTaskKey;
+    }
+  }
+
+  private static void asyncPlanFiles(
+      TableScan tableScan, String asyncPlanId, int tasksPerPlanTask) {
+    ASYNC_PLANNING_POOL.execute(() -> planFilesFor(tableScan, asyncPlanId, 
tasksPerPlanTask));
+  }
 }
diff --git 
a/core/src/main/java/org/apache/iceberg/rest/InMemoryPlanningState.java 
b/core/src/main/java/org/apache/iceberg/rest/InMemoryPlanningState.java
new file mode 100644
index 0000000000..2aed2f7cf5
--- /dev/null
+++ b/core/src/main/java/org/apache/iceberg/rest/InMemoryPlanningState.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.rest;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.iceberg.FileScanTask;
+import org.apache.iceberg.exceptions.NoSuchPlanIdException;
+import org.apache.iceberg.exceptions.NoSuchPlanTaskException;
+import org.apache.iceberg.relocated.com.google.common.base.Splitter;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
+import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.util.Pair;
+
+/**
+ * Encapsulates shared state in-memory for server-side scan planning.
+ *
+ * <p>This class maintains the state needed for pagination via plan tasks, 
mappings between plan
+ * tasks and file scan tasks, and an executor service for asynchronous 
planning.
+ */
+class InMemoryPlanningState {
+  private static volatile InMemoryPlanningState instance;
+  private static final String INITIAL_TASK_SEQUENCE_NUMBER = "0";
+
+  private final Map<String, List<FileScanTask>> planTaskToFileScanTasks;
+  private final Map<String, String> planTaskToNext;
+
+  private InMemoryPlanningState() {
+    this.planTaskToFileScanTasks = Maps.newConcurrentMap();
+    this.planTaskToNext = Maps.newConcurrentMap();
+  }
+
+  static InMemoryPlanningState getInstance() {
+    if (instance == null) {
+      synchronized (InMemoryPlanningState.class) {
+        if (instance == null) {
+          instance = new InMemoryPlanningState();
+        }
+      }
+    }
+    return instance;
+  }
+
+  void addPlanTask(String planTaskKey, List<FileScanTask> tasks) {
+    planTaskToFileScanTasks.put(planTaskKey, tasks);
+  }
+
+  void addNextPlanTask(String currentTask, String nextTask) {
+    planTaskToNext.put(currentTask, nextTask);
+  }
+
+  List<FileScanTask> fileScanTasksForPlanTask(String planTaskKey) {
+    List<FileScanTask> tasks = planTaskToFileScanTasks.get(planTaskKey);
+    if (tasks == null) {
+      throw new NoSuchPlanTaskException("Could not find tasks for plan task 
%s", planTaskKey);
+    }
+
+    return tasks;
+  }
+
+  List<String> nextPlanTask(String planTaskKey) {
+    String nextPlanTask = planTaskToNext.get(planTaskKey);
+    if (nextPlanTask != null) {
+      return ImmutableList.of(nextPlanTask);
+    }
+
+    return ImmutableList.of();
+  }
+
+  /**
+   * Retrieves the initial set of file scan tasks for a plan. PlanIDs are 
assumed to be separated
+   * with hyphens where the last component indicates the sequencing of plan 
IDs.
+   *
+   * @param planId the plan identifier
+   * @return pair of initial file scan tasks and the plan task key
+   * @throws NoSuchPlanIdException if the plan ID is not found
+   */
+  Pair<List<FileScanTask>, String> initialScanTasksFor(String planId) {
+    Set<Map.Entry<String, List<FileScanTask>>> matchingEntries =
+        planTaskToFileScanTasks.entrySet().stream()
+            .filter(
+                entry -> {
+                  String key = entry.getKey();
+                  if (!key.contains(planId)) {
+                    return false;
+                  }
+                  List<String> keyParts = Splitter.on("-").splitToList(key);
+                  if (keyParts.isEmpty()) {
+                    return false;
+                  }
+
+                  return 
INITIAL_TASK_SEQUENCE_NUMBER.equals(keyParts.get(keyParts.size() - 1));
+                })
+            .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))
+            .entrySet();
+    if (matchingEntries.isEmpty()) {
+      throw new NoSuchPlanIdException("Could not find plan ID %s", planId);
+    }
+
+    Map.Entry<String, List<FileScanTask>> initialEntry = 
Iterables.getOnlyElement(matchingEntries);
+    return Pair.of(initialEntry.getValue(), initialEntry.getKey());
+  }
+
+  void removePlan(String planId) {
+    planTaskToNext.entrySet().removeIf(entry -> 
entry.getKey().contains(planId));
+    planTaskToFileScanTasks.entrySet().removeIf(entry -> 
entry.getKey().contains(planId));
+  }
+
+  void clear() {
+    planTaskToFileScanTasks.clear();
+    planTaskToNext.clear();
+  }
+}
diff --git a/core/src/test/java/org/apache/iceberg/rest/RESTCatalogAdapter.java 
b/core/src/test/java/org/apache/iceberg/rest/RESTCatalogAdapter.java
index 5c9fac6302..1a7a0e03d5 100644
--- a/core/src/test/java/org/apache/iceberg/rest/RESTCatalogAdapter.java
+++ b/core/src/test/java/org/apache/iceberg/rest/RESTCatalogAdapter.java
@@ -32,6 +32,7 @@ import org.apache.http.HttpHeaders;
 import org.apache.iceberg.BaseTable;
 import org.apache.iceberg.BaseTransaction;
 import org.apache.iceberg.Table;
+import org.apache.iceberg.TableScan;
 import org.apache.iceberg.Transaction;
 import org.apache.iceberg.Transactions;
 import org.apache.iceberg.catalog.Catalog;
@@ -46,6 +47,8 @@ import org.apache.iceberg.exceptions.ForbiddenException;
 import org.apache.iceberg.exceptions.NamespaceNotEmptyException;
 import org.apache.iceberg.exceptions.NoSuchIcebergTableException;
 import org.apache.iceberg.exceptions.NoSuchNamespaceException;
+import org.apache.iceberg.exceptions.NoSuchPlanIdException;
+import org.apache.iceberg.exceptions.NoSuchPlanTaskException;
 import org.apache.iceberg.exceptions.NoSuchTableException;
 import org.apache.iceberg.exceptions.NoSuchViewException;
 import org.apache.iceberg.exceptions.NotAuthorizedException;
@@ -61,6 +64,8 @@ import 
org.apache.iceberg.rest.requests.CommitTransactionRequest;
 import org.apache.iceberg.rest.requests.CreateNamespaceRequest;
 import org.apache.iceberg.rest.requests.CreateTableRequest;
 import org.apache.iceberg.rest.requests.CreateViewRequest;
+import org.apache.iceberg.rest.requests.FetchScanTasksRequest;
+import org.apache.iceberg.rest.requests.PlanTableScanRequest;
 import org.apache.iceberg.rest.requests.RegisterTableRequest;
 import org.apache.iceberg.rest.requests.RenameTableRequest;
 import org.apache.iceberg.rest.requests.ReportMetricsRequest;
@@ -91,6 +96,8 @@ public class RESTCatalogAdapter extends BaseHTTPClient {
           .put(CommitFailedException.class, 409)
           .put(UnprocessableEntityException.class, 422)
           .put(CommitStateUnknownException.class, 500)
+          .put(NoSuchPlanIdException.class, 404)
+          .put(NoSuchPlanTaskException.class, 404)
           .buildOrThrow();
 
   private final Catalog catalog;
@@ -98,6 +105,7 @@ public class RESTCatalogAdapter extends BaseHTTPClient {
   private final ViewCatalog asViewCatalog;
 
   private AuthSession authSession = AuthSession.EMPTY;
+  private final PlanningBehavior planningBehavior = planningBehavior();
 
   public RESTCatalogAdapter(Catalog catalog) {
     this.catalog = catalog;
@@ -294,6 +302,42 @@ public class RESTCatalogAdapter extends BaseHTTPClient {
           return castResponse(responseType, response);
         }
 
+      case PLAN_TABLE_SCAN:
+        {
+          TableIdentifier ident = tableIdentFromPathVars(vars);
+          PlanTableScanRequest request = 
castRequest(PlanTableScanRequest.class, body);
+          return castResponse(
+              responseType,
+              CatalogHandlers.planTableScan(
+                  catalog,
+                  ident,
+                  request,
+                  planningBehavior::shouldPlanTableScanAsync,
+                  scan -> planningBehavior.numberFileScanTasksPerPlanTask()));
+        }
+
+      case FETCH_PLANNING_RESULT:
+        {
+          TableIdentifier ident = tableIdentFromPathVars(vars);
+          String planId = planIDFromPathVars(vars);
+          return castResponse(
+              responseType, CatalogHandlers.fetchPlanningResult(catalog, 
ident, planId));
+        }
+
+      case FETCH_SCAN_TASKS:
+        {
+          TableIdentifier ident = tableIdentFromPathVars(vars);
+          FetchScanTasksRequest request = 
castRequest(FetchScanTasksRequest.class, body);
+          return castResponse(
+              responseType, CatalogHandlers.fetchScanTasks(catalog, ident, 
request));
+        }
+
+      case CANCEL_PLAN_TABLE_SCAN:
+        {
+          CatalogHandlers.cancelPlanTableScan(planIDFromPathVars(vars));
+          return null;
+        }
+
       case REGISTER_TABLE:
         {
           LoadTableResponse response =
@@ -533,11 +577,32 @@ public class RESTCatalogAdapter extends BaseHTTPClient {
     throw new RESTException("Unhandled error: %s", error);
   }
 
+  /**
+   * Supplied interface to allow RESTCatalogAdapter implementations to have a 
mechanism to change
+   * how many file scan tasks get grouped in a plan task or under what 
conditions a table scan
+   * should be performed async. Primarily used in testing to allow overriding 
more deterministic
+   * ways of planning behavior.
+   */
+  public interface PlanningBehavior {
+    default int numberFileScanTasksPerPlanTask() {
+      return 100;
+    }
+
+    default boolean shouldPlanTableScanAsync(TableScan tableScan) {
+      return false;
+    }
+  }
+
+  protected PlanningBehavior planningBehavior() {
+    return new PlanningBehavior() {};
+  }
+
   @Override
   public void close() throws IOException {
     // The calling test is responsible for closing the underlying catalog 
backing this REST catalog
     // so that the underlying backend catalog is not closed and reopened 
during the REST catalog's
     // initialize method when fetching the server configuration.
+    CatalogHandlers.clearPlanningState();
   }
 
   private static class BadResponseType extends RuntimeException {
@@ -592,6 +657,10 @@ public class RESTCatalogAdapter extends BaseHTTPClient {
         namespaceFromPathVars(pathVars), 
RESTUtil.decodeString(pathVars.get("view")));
   }
 
+  private static String planIDFromPathVars(Map<String, String> pathVars) {
+    return RESTUtil.decodeString(pathVars.get("plan-id"));
+  }
+
   private static SnapshotMode snapshotModeFromQueryParams(Map<String, String> 
queryParams) {
     return SnapshotMode.valueOf(
         queryParams
diff --git a/core/src/test/java/org/apache/iceberg/rest/Route.java 
b/core/src/test/java/org/apache/iceberg/rest/Route.java
index 44c7312f3f..eedb2615ad 100644
--- a/core/src/test/java/org/apache/iceberg/rest/Route.java
+++ b/core/src/test/java/org/apache/iceberg/rest/Route.java
@@ -26,6 +26,8 @@ import 
org.apache.iceberg.rest.requests.CommitTransactionRequest;
 import org.apache.iceberg.rest.requests.CreateNamespaceRequest;
 import org.apache.iceberg.rest.requests.CreateTableRequest;
 import org.apache.iceberg.rest.requests.CreateViewRequest;
+import org.apache.iceberg.rest.requests.FetchScanTasksRequest;
+import org.apache.iceberg.rest.requests.PlanTableScanRequest;
 import org.apache.iceberg.rest.requests.RegisterTableRequest;
 import org.apache.iceberg.rest.requests.RenameTableRequest;
 import org.apache.iceberg.rest.requests.ReportMetricsRequest;
@@ -33,12 +35,15 @@ import 
org.apache.iceberg.rest.requests.UpdateNamespacePropertiesRequest;
 import org.apache.iceberg.rest.requests.UpdateTableRequest;
 import org.apache.iceberg.rest.responses.ConfigResponse;
 import org.apache.iceberg.rest.responses.CreateNamespaceResponse;
+import org.apache.iceberg.rest.responses.FetchPlanningResultResponse;
+import org.apache.iceberg.rest.responses.FetchScanTasksResponse;
 import org.apache.iceberg.rest.responses.GetNamespaceResponse;
 import org.apache.iceberg.rest.responses.ListNamespacesResponse;
 import org.apache.iceberg.rest.responses.ListTablesResponse;
 import org.apache.iceberg.rest.responses.LoadTableResponse;
 import org.apache.iceberg.rest.responses.LoadViewResponse;
 import org.apache.iceberg.rest.responses.OAuthTokenResponse;
+import org.apache.iceberg.rest.responses.PlanTableScanResponse;
 import org.apache.iceberg.rest.responses.UpdateNamespacePropertiesResponse;
 import org.apache.iceberg.util.Pair;
 
@@ -109,7 +114,24 @@ enum Route {
       LoadViewResponse.class),
   RENAME_VIEW(
       HTTPRequest.HTTPMethod.POST, ResourcePaths.V1_VIEW_RENAME, 
RenameTableRequest.class, null),
-  DROP_VIEW(HTTPRequest.HTTPMethod.DELETE, ResourcePaths.V1_VIEW);
+  DROP_VIEW(HTTPRequest.HTTPMethod.DELETE, ResourcePaths.V1_VIEW),
+  PLAN_TABLE_SCAN(
+      HTTPRequest.HTTPMethod.POST,
+      ResourcePaths.V1_TABLE_SCAN_PLAN_SUBMIT,
+      PlanTableScanRequest.class,
+      PlanTableScanResponse.class),
+  FETCH_PLANNING_RESULT(
+      HTTPRequest.HTTPMethod.GET,
+      ResourcePaths.V1_TABLE_SCAN_PLAN,
+      null,
+      FetchPlanningResultResponse.class),
+  FETCH_SCAN_TASKS(
+      HTTPRequest.HTTPMethod.POST,
+      ResourcePaths.V1_TABLE_SCAN_PLAN_TASKS,
+      FetchScanTasksRequest.class,
+      FetchScanTasksResponse.class),
+  CANCEL_PLAN_TABLE_SCAN(
+      HTTPRequest.HTTPMethod.DELETE, ResourcePaths.V1_TABLE_SCAN_PLAN, null, 
null);
 
   private final HTTPRequest.HTTPMethod method;
   private final int requiredLength;

Reply via email to