This is an automated email from the ASF dual-hosted git repository.

sumitagrawal pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 9074b8df0e HDDS-10608. Recon can't get full key when using Recon API. 
(#6492)
9074b8df0e is described below

commit 9074b8df0e2943bd18e13c6ddbdbc4fc6c41b34c
Author: Arafat2198 <[email protected]>
AuthorDate: Thu May 9 11:08:59 2024 +0530

    HDDS-10608. Recon can't get full key when using Recon API. (#6492)
---
 .../ozone/recon/TestReconContainerEndpoint.java    | 222 +++++++++++++++++++++
 .../org/apache/hadoop/ozone/recon/ReconUtils.java  | 113 ++++++++++-
 .../hadoop/ozone/recon/api/ContainerEndpoint.java  |  51 +++--
 .../recon/api/handlers/DirectoryEntityHandler.java |   1 -
 .../hadoop/ozone/recon/api/types/KeyMetadata.java  |  11 +
 .../hadoop/ozone/recon/api/types/KeysResponse.java |   9 +-
 .../hadoop/ozone/recon/api/types/NSSummary.java    |  15 +-
 .../hadoop/ozone/recon/codec/NSSummaryCodec.java   |  17 +-
 .../recon/spi/ReconNamespaceSummaryManager.java    |   3 +
 .../spi/impl/ReconNamespaceSummaryManagerImpl.java |  12 +-
 .../recon/tasks/NSSummaryTaskDbEventHandler.java   |   2 +
 .../recon/api/TestNSSummaryEndpointWithFSO.java    | 166 ++++++++++++++-
 .../recon/api/TestNSSummaryEndpointWithLegacy.java |  48 ++++-
 .../api/TestNSSummaryEndpointWithOBSAndLegacy.java |  76 +++++--
 .../impl/TestReconNamespaceSummaryManagerImpl.java |   6 +-
 .../recon/tasks/TestNSSummaryTaskWithFSO.java      |  54 ++++-
 16 files changed, 748 insertions(+), 58 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java
new file mode 100644
index 0000000000..8c334780d9
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.client.BucketArgs;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.recon.api.ContainerEndpoint;
+import org.apache.hadoop.ozone.recon.api.types.KeyMetadata;
+import org.apache.hadoop.ozone.recon.api.types.KeysResponse;
+import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.scm.ReconContainerManager;
+import java.nio.charset.StandardCharsets;
+import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import javax.ws.rs.core.Response;
+import java.io.IOException;
+import java.util.Collection;
+
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * Integration test fo recon container endpoint.
+ */
+public class TestReconContainerEndpoint {
+
+  private OzoneConfiguration conf;
+  private MiniOzoneCluster cluster;
+  private OzoneClient client;
+  private ObjectStore store;
+
+  @BeforeEach
+  public void init() throws Exception {
+    conf = new OzoneConfiguration();
+    conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
+        OMConfigKeys.OZONE_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED);
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(3)
+        .includeRecon(true)
+        .build();
+    cluster.waitForClusterToBeReady();
+    client = cluster.newClient();
+    store = client.getObjectStore();
+  }
+
+  @AfterEach
+  public void shutdown() throws IOException {
+    if (client != null) {
+      client.close();
+    }
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testContainerEndpointForFSOLayout() throws Exception {
+    // Setup: Create multiple volumes, buckets, and key hierarchies
+    String volName = "testvol";
+    String bucketName = "fsobucket";
+    // Scenario 1: Deeply nested directories
+    String nestedDirKey = "dir1/dir2/dir3/file1";
+    // Scenario 2: Single file in a bucket
+    String singleFileKey = "file1";
+
+    // Create volume and bucket
+    store.createVolume(volName);
+    OzoneVolume volume = store.getVolume(volName);
+    volume.createBucket(bucketName, BucketArgs.newBuilder()
+        .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED).build());
+
+    // Write keys to the bucket
+    writeTestData(volName, bucketName, nestedDirKey, "data1");
+    writeTestData(volName, bucketName, singleFileKey, "data2");
+
+    // Synchronize data from OM to Recon
+    OzoneManagerServiceProviderImpl impl = (OzoneManagerServiceProviderImpl)
+        cluster.getReconServer().getOzoneManagerServiceProvider();
+    impl.syncDataFromOM();
+
+    //Search for the bucket from the bucket table and verify its FSO
+    OmBucketInfo bucketInfo = cluster.getOzoneManager().getBucketInfo(volName, 
bucketName);
+    assertNotNull(bucketInfo);
+    assertEquals(BucketLayout.FILE_SYSTEM_OPTIMIZED,
+        bucketInfo.getBucketLayout());
+
+    // Assuming a known container ID that these keys have been written into
+    long testContainerID = 1L;
+
+    // Query the ContainerEndpoint for the keys in the specified container
+    Response response = getContainerEndpointResponse(testContainerID);
+
+    assertNotNull(response, "Response should not be null.");
+    assertEquals(Response.Status.OK.getStatusCode(), response.getStatus(),
+        "Expected HTTP 200 OK response.");
+
+    KeysResponse data = (KeysResponse) response.getEntity();
+    Collection<KeyMetadata> keyMetadataList = data.getKeys();
+
+    assertEquals(1, data.getTotalCount());
+    assertEquals(1, keyMetadataList.size());
+
+    // Assert the file name and the complete path.
+    KeyMetadata keyMetadata = keyMetadataList.iterator().next();
+    assertEquals("file1", keyMetadata.getKey());
+    assertEquals("testvol/fsobucket/dir1/dir2/dir3/file1", 
keyMetadata.getCompletePath());
+
+    testContainerID = 2L;
+    response = getContainerEndpointResponse(testContainerID);
+    data = (KeysResponse) response.getEntity();
+    keyMetadataList = data.getKeys();
+    assertEquals(1, data.getTotalCount());
+    assertEquals(1, keyMetadataList.size());
+
+    // Assert the file name and the complete path.
+    keyMetadata = keyMetadataList.iterator().next();
+    assertEquals("file1", keyMetadata.getKey());
+    assertEquals("testvol/fsobucket/file1", keyMetadata.getCompletePath());
+  }
+
+  @Test
+  public void testContainerEndpointForOBSBucket() throws Exception {
+    String volumeName = "testvol2";
+    String obsBucketName = "obsbucket";
+    String obsSingleFileKey = "file1";
+
+    // Setup volume and OBS bucket
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(obsBucketName,
+        BucketArgs.newBuilder().setBucketLayout(BucketLayout.OBJECT_STORE)
+            .build());
+
+    // Write a single file to the OBS bucket
+    writeTestData(volumeName, obsBucketName, obsSingleFileKey, "Hello OBS!");
+
+    OzoneManagerServiceProviderImpl impl =
+        (OzoneManagerServiceProviderImpl) cluster.getReconServer()
+            .getOzoneManagerServiceProvider();
+    impl.syncDataFromOM();
+
+    // Search for the bucket from the bucket table and verify its OBS
+    OmBucketInfo bucketInfo = 
cluster.getOzoneManager().getBucketInfo(volumeName, obsBucketName);
+    assertNotNull(bucketInfo);
+    assertEquals(BucketLayout.OBJECT_STORE, bucketInfo.getBucketLayout());
+
+    // Initialize the ContainerEndpoint
+    long containerId = 1L;
+    Response response = getContainerEndpointResponse(containerId);
+
+    assertNotNull(response, "Response should not be null.");
+    assertEquals(Response.Status.OK.getStatusCode(), response.getStatus(),
+        "Expected HTTP 200 OK response.");
+    KeysResponse data = (KeysResponse) response.getEntity();
+    Collection<KeyMetadata> keyMetadataList = data.getKeys();
+
+    assertEquals(1, data.getTotalCount());
+    assertEquals(1, keyMetadataList.size());
+
+    KeyMetadata keyMetadata = keyMetadataList.iterator().next();
+    assertEquals("file1", keyMetadata.getKey());
+    assertEquals("testvol2/obsbucket/file1", keyMetadata.getCompletePath());
+  }
+
+  private Response getContainerEndpointResponse(long containerId) {
+    OzoneStorageContainerManager reconSCM =
+        cluster.getReconServer().getReconStorageContainerManager();
+    ReconContainerManager reconContainerManager =
+        (ReconContainerManager) reconSCM.getContainerManager();
+    ContainerHealthSchemaManager containerHealthSchemaManager =
+        reconContainerManager.getContainerSchemaManager();
+    ReconOMMetadataManager omMetadataManagerInstance =
+        (ReconOMMetadataManager)
+            cluster.getReconServer().getOzoneManagerServiceProvider()
+                .getOMMetadataManagerInstance();
+    ContainerEndpoint containerEndpoint =
+        new ContainerEndpoint(reconSCM, containerHealthSchemaManager,
+            cluster.getReconServer().getReconNamespaceSummaryManager(),
+            cluster.getReconServer().getReconContainerMetadataManager(),
+            omMetadataManagerInstance);
+    return containerEndpoint.getKeysForContainer(containerId, 10, "");
+  }
+
+  private void writeTestData(String volumeName, String bucketName,
+                             String keyPath, String data) throws Exception {
+    try (OzoneOutputStream out = client.getObjectStore().getVolume(volumeName)
+        .getBucket(bucketName)
+        .createKey(keyPath, data.length())) {
+      out.write(data.getBytes(StandardCharsets.UTF_8));
+    }
+  }
+
+}
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
index f154f024fb..76b601b1c0 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
@@ -32,6 +32,9 @@ import java.sql.Timestamp;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.stream.Collectors;
 
 import com.google.common.base.Preconditions;
@@ -55,17 +58,24 @@ import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_CONTAINER
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_THREAD_POOL_SIZE_DEFAULT;
 import static org.apache.hadoop.hdds.server.ServerUtils.getDirectoryFromConfig;
 import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR;
 import static org.jooq.impl.DSL.currentTimestamp;
 import static org.jooq.impl.DSL.select;
 import static org.jooq.impl.DSL.using;
 
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.recon.api.types.NSSummary;
 import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
 import org.apache.hadoop.ozone.recon.scm.ReconContainerReportQueue;
+import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
 import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao;
 import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats;
 import jakarta.annotation.Nonnull;
+import com.google.common.annotations.VisibleForTesting;
 import org.jooq.Configuration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -81,9 +91,11 @@ public class ReconUtils {
   public ReconUtils() {
   }
 
-  private static final Logger LOG = LoggerFactory.getLogger(
+  private static Logger log = LoggerFactory.getLogger(
       ReconUtils.class);
 
+  private static AtomicBoolean rebuildTriggered = new AtomicBoolean(false);
+
   public static File getReconScmDbDir(ConfigurationSource conf) {
     return new ReconUtils().getReconDbDir(conf, OZONE_RECON_SCM_DB_DIR);
   }
@@ -123,7 +135,7 @@ public class ReconUtils {
       return metadataDir;
     }
 
-    LOG.warn("{} is not configured. We recommend adding this setting. " +
+    log.warn("{} is not configured. We recommend adding this setting. " +
             "Falling back to {} instead.",
         dirConfigKey, HddsConfigKeys.OZONE_METADATA_DIRS);
     return getOzoneMetaDirPath(conf);
@@ -158,7 +170,7 @@ public class ReconUtils {
         org.apache.hadoop.io.IOUtils.closeStream(tarOs);
         org.apache.hadoop.io.IOUtils.closeStream(fileOutputStream);
       } catch (Exception e) {
-        LOG.error("Exception encountered when closing " +
+        log.error("Exception encountered when closing " +
             "TAR file output stream: " + e);
       }
     }
@@ -223,7 +235,7 @@ public class ReconUtils {
           if (entry.isDirectory()) {
             boolean success = f.mkdirs();
             if (!success) {
-              LOG.error("Unable to create directory found in tar.");
+              log.error("Unable to create directory found in tar.");
             }
           } else {
             //Write contents of file in archive to a new file.
@@ -246,25 +258,103 @@ public class ReconUtils {
     }
   }
 
+
+  /**
+   * Constructs the full path of a key from its OmKeyInfo using a bottom-up 
approach, starting from the leaf node.
+   *
+   * The method begins with the leaf node (the key itself) and recursively 
prepends parent directory names, fetched
+   * via NSSummary objects, until reaching the parent bucket (parentId is -1). 
It effectively builds the path from
+   * bottom to top, finally prepending the volume and bucket names to complete 
the full path. If the directory structure
+   * is currently being rebuilt (indicated by the rebuildTriggered flag), this 
method returns an empty string to signify
+   * that path construction is temporarily unavailable.
+   *
+   * @param omKeyInfo The OmKeyInfo object for the key
+   * @return The constructed full path of the key as a String, or an empty 
string if a rebuild is in progress and
+   *         the path cannot be constructed at this time.
+   * @throws IOException
+   */
+  public static String constructFullPath(OmKeyInfo omKeyInfo,
+                                         ReconNamespaceSummaryManager 
reconNamespaceSummaryManager,
+                                         ReconOMMetadataManager 
omMetadataManager)
+      throws IOException {
+
+    StringBuilder fullPath = new StringBuilder(omKeyInfo.getKeyName());
+    long parentId = omKeyInfo.getParentObjectID();
+    boolean isDirectoryPresent = false;
+
+    while (parentId != 0) {
+      NSSummary nsSummary = 
reconNamespaceSummaryManager.getNSSummary(parentId);
+      if (nsSummary == null) {
+        log.warn("NSSummary tree is currently being rebuilt or the directory 
could be in the progress of " +
+            "deletion, returning empty string for path construction.");
+        return "";
+      }
+      if (nsSummary.getParentId() == -1) {
+        if (rebuildTriggered.compareAndSet(false, true)) {
+          triggerRebuild(reconNamespaceSummaryManager, omMetadataManager);
+        }
+        log.warn("NSSummary tree is currently being rebuilt, returning empty 
string for path construction.");
+        return "";
+      }
+      fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX);
+
+      // Move to the parent ID of the current directory
+      parentId = nsSummary.getParentId();
+      isDirectoryPresent = true;
+    }
+
+    // Prepend the volume and bucket to the constructed path
+    String volumeName = omKeyInfo.getVolumeName();
+    String bucketName = omKeyInfo.getBucketName();
+    fullPath.insert(0, volumeName + OM_KEY_PREFIX + bucketName + 
OM_KEY_PREFIX);
+    if (isDirectoryPresent) {
+      return OmUtils.normalizeKey(fullPath.toString(), true);
+    }
+    return fullPath.toString();
+  }
+
+  private static void triggerRebuild(ReconNamespaceSummaryManager 
reconNamespaceSummaryManager,
+                                     ReconOMMetadataManager omMetadataManager) 
{
+    ExecutorService executor = Executors.newSingleThreadExecutor(r -> {
+      Thread t = new Thread(r);
+      t.setName("RebuildNSSummaryThread");
+      return t;
+    });
+
+    executor.submit(() -> {
+      long startTime = System.currentTimeMillis();
+      log.info("Rebuilding NSSummary tree...");
+      try {
+        reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager);
+      } finally {
+        long endTime = System.currentTimeMillis();
+        log.info("NSSummary tree rebuild completed in {} ms.", endTime - 
startTime);
+      }
+    });
+    executor.shutdown();
+  }
+
   /**
    * Make HTTP GET call on the URL and return HttpURLConnection instance.
+   *
    * @param connectionFactory URLConnectionFactory to use.
-   * @param url url to call
-   * @param isSpnego is SPNEGO enabled
+   * @param url               url to call
+   * @param isSpnego          is SPNEGO enabled
    * @return HttpURLConnection instance of the HTTP call.
    * @throws IOException, AuthenticationException While reading the response.
    */
   public HttpURLConnection makeHttpCall(URLConnectionFactory connectionFactory,
-                                  String url, boolean isSpnego)
+                                        String url, boolean isSpnego)
       throws IOException, AuthenticationException {
     HttpURLConnection urlConnection = (HttpURLConnection)
-          connectionFactory.openConnection(new URL(url), isSpnego);
+        connectionFactory.openConnection(new URL(url), isSpnego);
     urlConnection.connect();
     return urlConnection;
   }
 
   /**
    * Load last known DB in Recon.
+   *
    * @param reconDbDir
    * @param fileNamePrefix
    * @return
@@ -289,7 +379,7 @@ public class ReconUtils {
               lastKnownSnapshotFileName = fileName;
             }
           } catch (NumberFormatException nfEx) {
-            LOG.warn("Unknown file found in Recon DB dir : {}", fileName);
+            log.warn("Unknown file found in Recon DB dir : {}", fileName);
           }
         }
       }
@@ -414,4 +504,9 @@ public class ReconUtils {
         HddsServerUtil.getReconDataNodeBindAddress(conf));
     return builder.build();
   }
+
+  @VisibleForTesting
+  public static void setLogger(Logger logger) {
+    log = logger;
+  }
 }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
index afc9c8a323..86ef6c022d 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.recon.ReconUtils;
 import org.apache.hadoop.ozone.recon.api.types.ContainerDiscrepancyInfo;
 import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
 import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata;
@@ -94,10 +95,7 @@ import static 
org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY;
 @AdminOnly
 public class ContainerEndpoint {
 
-  @Inject
   private ReconContainerMetadataManager reconContainerMetadataManager;
-
-  @Inject
   private ReconOMMetadataManager omMetadataManager;
 
   private final ReconContainerManager containerManager;
@@ -144,33 +142,38 @@ public class ContainerEndpoint {
 
   @Inject
   public ContainerEndpoint(OzoneStorageContainerManager reconSCM,
-               ContainerHealthSchemaManager containerHealthSchemaManager,
-               ReconNamespaceSummaryManager reconNamespaceSummaryManager) {
+                           ContainerHealthSchemaManager 
containerHealthSchemaManager,
+                           ReconNamespaceSummaryManager 
reconNamespaceSummaryManager,
+                           ReconContainerMetadataManager 
reconContainerMetadataManager,
+                           ReconOMMetadataManager omMetadataManager) {
     this.containerManager =
         (ReconContainerManager) reconSCM.getContainerManager();
     this.pipelineManager = reconSCM.getPipelineManager();
     this.containerHealthSchemaManager = containerHealthSchemaManager;
     this.reconNamespaceSummaryManager = reconNamespaceSummaryManager;
     this.reconSCM = reconSCM;
+    this.reconContainerMetadataManager = reconContainerMetadataManager;
+    this.omMetadataManager = omMetadataManager;
   }
 
   /**
    * Return @{@link org.apache.hadoop.hdds.scm.container}
    * for the containers starting from the given "prev-key" query param for the
    * given "limit". The given "prev-key" is skipped from the results returned.
+   *
    * @param prevKey the containerID after which results are returned.
    *                start containerID, >=0,
    *                start searching at the head if 0.
-   * @param limit max no. of containers to get.
-   *              count must be >= 0
-   *              Usually the count will be replace with a very big
-   *              value instead of being unlimited in case the db is very big.
+   * @param limit   max no. of containers to get.
+   *                count must be >= 0
+   *                Usually the count will be replace with a very big
+   *                value instead of being unlimited in case the db is very 
big.
    * @return {@link Response}
    */
   @GET
   public Response getContainers(
       @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT)
-          int limit,
+      int limit,
       @DefaultValue(PREV_CONTAINER_ID_DEFAULT_VALUE)
       @QueryParam(RECON_QUERY_PREVKEY) long prevKey) {
     if (limit < 0 || prevKey < 0) {
@@ -212,8 +215,8 @@ public class ContainerEndpoint {
    * starting from the given "prev-key" query param for the given "limit".
    * The given prevKeyPrefix is skipped from the results returned.
    *
-   * @param containerID the given containerID.
-   * @param limit max no. of keys to get.
+   * @param containerID   the given containerID.
+   * @param limit         max no. of keys to get.
    * @param prevKeyPrefix the key prefix after which results are returned.
    * @return {@link Response}
    */
@@ -226,7 +229,12 @@ public class ContainerEndpoint {
       @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY)
           String prevKeyPrefix) {
     Map<String, KeyMetadata> keyMetadataMap = new LinkedHashMap<>();
+
+    // Total count of keys in the container.
     long totalCount;
+    // Last key prefix to be used for pagination. It will be exposed in the 
response.
+    String lastKey = "";
+
     try {
       Map<ContainerKeyPrefix, Integer> containerKeyPrefixMap =
           reconContainerMetadataManager.getKeyPrefixesForContainer(containerID,
@@ -263,6 +271,7 @@ public class ContainerEndpoint {
               omKeyInfo.getVolumeName(),
               omKeyInfo.getBucketName(),
               omKeyInfo.getKeyName());
+          lastKey = ozoneKey;
           if (keyMetadataMap.containsKey(ozoneKey)) {
             keyMetadataMap.get(ozoneKey).getVersions()
                 .add(containerKeyPrefix.getKeyVersion());
@@ -278,6 +287,8 @@ public class ContainerEndpoint {
             keyMetadata.setBucket(omKeyInfo.getBucketName());
             keyMetadata.setVolume(omKeyInfo.getVolumeName());
             keyMetadata.setKey(omKeyInfo.getKeyName());
+            keyMetadata.setCompletePath(ReconUtils.constructFullPath(omKeyInfo,
+                reconNamespaceSummaryManager, omMetadataManager));
             keyMetadata.setCreationTime(
                 Instant.ofEpochMilli(omKeyInfo.getCreationTime()));
             keyMetadata.setModificationTime(
@@ -298,7 +309,7 @@ public class ContainerEndpoint {
           Response.Status.INTERNAL_SERVER_ERROR);
     }
     KeysResponse keysResponse =
-        new KeysResponse(totalCount, keyMetadataMap.values());
+        new KeysResponse(totalCount, keyMetadataMap.values(), lastKey);
     return Response.ok(keysResponse).build();
   }
 
@@ -334,7 +345,7 @@ public class ContainerEndpoint {
   ) {
     List<MissingContainerMetadata> missingContainers = new ArrayList<>();
     containerHealthSchemaManager.getUnhealthyContainers(
-        UnHealthyContainerStates.MISSING, 0, limit)
+            UnHealthyContainerStates.MISSING, 0, limit)
         .forEach(container -> {
           long containerID = container.getContainerId();
           try {
@@ -378,7 +389,7 @@ public class ContainerEndpoint {
   public Response getUnhealthyContainers(
       @PathParam("state") String state,
       @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT)
-          int limit,
+      int limit,
       @DefaultValue(DEFAULT_BATCH_NUMBER)
       @QueryParam(RECON_QUERY_BATCH_PARAM) int batchNum) {
     int offset = Math.max(((batchNum - 1) * limit), 0);
@@ -399,7 +410,8 @@ public class ContainerEndpoint {
           .getUnhealthyContainers(internalState, offset, limit);
       List<UnhealthyContainers> emptyMissingFiltered = containers.stream()
           .filter(
-              container -> 
!container.getContainerState().equals(UnHealthyContainerStates.EMPTY_MISSING.toString()))
+              container -> !container.getContainerState()
+                  .equals(UnHealthyContainerStates.EMPTY_MISSING.toString()))
           .collect(
               Collectors.toList());
       for (UnhealthyContainers c : emptyMissingFiltered) {
@@ -433,7 +445,6 @@ public class ContainerEndpoint {
    * Return
    * {@link org.apache.hadoop.ozone.recon.api.types.UnhealthyContainerMetadata}
    * for all unhealthy containers.
-
    * @param limit The limit of unhealthy containers to return.
    * @param batchNum The batch number (like "page number") of results to 
return.
    *                 Passing 1, will return records 1 to limit. 2 will return
@@ -444,7 +455,7 @@ public class ContainerEndpoint {
   @Path("/unhealthy")
   public Response getUnhealthyContainers(
       @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT)
-          int limit,
+      int limit,
       @DefaultValue(DEFAULT_BATCH_NUMBER)
       @QueryParam(RECON_QUERY_BATCH_PARAM) int batchNum) {
     return getUnhealthyContainers(null, limit, batchNum);
@@ -519,6 +530,7 @@ public class ContainerEndpoint {
   /**
    * Helper function to extract the blocks for a given container from a given
    * OM Key.
+   *
    * @param matchedKeys List of OM Key Info locations
    * @param containerID containerId.
    * @return List of blocks.
@@ -703,7 +715,8 @@ public class ContainerEndpoint {
   }
 
 
-  /** This API retrieves set of deleted containers in SCM which are present
+  /**
+   * This API retrieves set of deleted containers in SCM which are present
    * in OM to find out list of keys mapped to such DELETED state containers.
    *
    * limit - limits the number of such SCM DELETED containers present in OM.
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java
index ae7181af70..b535943081 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java
@@ -165,7 +165,6 @@ public class DirectoryEntityHandler extends EntityHandler {
     }
 
     duResponse.setDuData(subdirDUData);
-
     return duResponse;
   }
 
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java
index c48e21d90f..5094f47c24 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java
@@ -45,6 +45,9 @@ public class KeyMetadata {
   @XmlElement(name = "Key")
   private String key;
 
+  @XmlElement(name = "CompletePath")
+  private String completePath;
+
   @XmlElement(name = "DataSize")
   private long dataSize;
 
@@ -126,6 +129,14 @@ public class KeyMetadata {
     this.blockIds = blockIds;
   }
 
+  public String getCompletePath() {
+    return completePath;
+  }
+
+  public void setCompletePath(String completePath) {
+    this.completePath = completePath;
+  }
+
   /**
    * Class to hold ContainerID and BlockID.
    */
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java
index 5b05975623..c09d28718e 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java
@@ -36,9 +36,13 @@ public class KeysResponse {
   @JsonProperty("keys")
   private Collection<KeyMetadata> keys;
 
-  public KeysResponse(long totalCount, Collection<KeyMetadata> keys) {
+  @JsonProperty("lastKey")
+  private String lastKey;
+
+  public KeysResponse(long totalCount, Collection<KeyMetadata> keys, String 
lastKey) {
     this.totalCount = totalCount;
     this.keys = keys;
+    this.lastKey = lastKey;
   }
 
   public long getTotalCount() {
@@ -48,4 +52,7 @@ public class KeysResponse {
   public Collection<KeyMetadata> getKeys() {
     return keys;
   }
+  public String getLastKey() {
+    return lastKey;
+  }
 }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java
index c0f93aebe9..0f774f01bf 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java
@@ -36,22 +36,25 @@ public class NSSummary {
   private int[] fileSizeBucket;
   private Set<Long> childDir;
   private String dirName;
+  private long parentId = 0;
 
   public NSSummary() {
     this(0, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS],
-         new HashSet<>(), "");
+        new HashSet<>(), "", 0);
   }
 
   public NSSummary(int numOfFiles,
                    long sizeOfFiles,
                    int[] bucket,
                    Set<Long> childDir,
-                   String dirName) {
+                   String dirName,
+                   long parentId) {
     this.numOfFiles = numOfFiles;
     this.sizeOfFiles = sizeOfFiles;
     setFileSizeBucket(bucket);
     this.childDir = childDir;
     this.dirName = dirName;
+    this.parentId = parentId;
   }
 
   public int getNumOfFiles() {
@@ -107,4 +110,12 @@ public class NSSummary {
       this.childDir.remove(childId);
     }
   }
+
+  public long getParentId() {
+    return parentId;
+  }
+
+  public void setParentId(long parentId) {
+    this.parentId = parentId;
+  }
 }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
index 09e0b25879..f3b273451a 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
@@ -65,9 +65,10 @@ public final class NSSummaryCodec implements 
Codec<NSSummary> {
     int stringLen = dirName.getBytes(StandardCharsets.UTF_8).length;
     int numOfChildDirs = childDirs.size();
     final int resSize = NUM_OF_INTS * Integer.BYTES
-        + (numOfChildDirs + 1) * Long.BYTES // 1 long field + list size
+        + (numOfChildDirs + 1) * Long.BYTES // 1 long field for parentId + 
list size
         + Short.BYTES // 2 dummy shorts to track length
-        + stringLen; // directory name length
+        + stringLen // directory name length
+        + Long.BYTES; // Added space for parentId serialization
 
     ByteArrayOutputStream out = new ByteArrayOutputStream(resSize);
     out.write(integerCodec.toPersistedFormat(object.getNumOfFiles()));
@@ -84,6 +85,8 @@ public final class NSSummaryCodec implements Codec<NSSummary> 
{
     }
     out.write(integerCodec.toPersistedFormat(stringLen));
     out.write(stringCodec.toPersistedFormat(dirName));
+    out.write(longCodec.toPersistedFormat(object.getParentId()));
+
     return out.toByteArray();
   }
 
@@ -117,6 +120,15 @@ public final class NSSummaryCodec implements 
Codec<NSSummary> {
     assert (bytesRead == strLen);
     String dirName = stringCodec.fromPersistedFormat(buffer);
     res.setDirName(dirName);
+
+    // Check if there is enough data available to read the parentId
+    if (in.available() >= Long.BYTES) {
+      long parentId = in.readLong();
+      res.setParentId(parentId);
+    } else {
+      // Set default parentId to -1 indicating it's from old format
+      res.setParentId(-1);
+    }
     return res;
   }
 
@@ -128,6 +140,7 @@ public final class NSSummaryCodec implements 
Codec<NSSummary> {
     copy.setFileSizeBucket(object.getFileSizeBucket());
     copy.setChildDir(object.getChildDir());
     copy.setDirName(object.getDirName());
+    copy.setParentId(object.getParentId());
     return copy;
   }
 }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconNamespaceSummaryManager.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconNamespaceSummaryManager.java
index 6cb93e7134..ea0ff6ed5d 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconNamespaceSummaryManager.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconNamespaceSummaryManager.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.recon.spi;
 import org.apache.hadoop.hdds.annotation.InterfaceStability;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.recon.api.types.NSSummary;
 
 import java.io.IOException;
@@ -45,4 +46,6 @@ public interface ReconNamespaceSummaryManager {
 
   void commitBatchOperation(RDBBatchOperation rdbBatchOperation)
       throws IOException;
+
+  void rebuildNSSummaryTree(OMMetadataManager omMetadataManager);
 }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconNamespaceSummaryManagerImpl.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconNamespaceSummaryManagerImpl.java
index 42a30095f3..9167854a82 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconNamespaceSummaryManagerImpl.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconNamespaceSummaryManagerImpl.java
@@ -22,8 +22,11 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
 import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.recon.api.types.NSSummary;
 import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
+import org.apache.hadoop.ozone.recon.tasks.NSSummaryTask;
+
 import static 
org.apache.hadoop.ozone.recon.spi.impl.ReconDBProvider.truncateTable;
 
 import javax.inject.Inject;
@@ -39,12 +42,14 @@ public class ReconNamespaceSummaryManagerImpl
 
   private Table<Long, NSSummary> nsSummaryTable;
   private DBStore namespaceDbStore;
+  private NSSummaryTask nsSummaryTask;
 
   @Inject
-  public ReconNamespaceSummaryManagerImpl(ReconDBProvider reconDBProvider)
+  public ReconNamespaceSummaryManagerImpl(ReconDBProvider reconDBProvider, 
NSSummaryTask nsSummaryTask)
           throws IOException {
     namespaceDbStore = reconDBProvider.getDbStore();
     this.nsSummaryTable = NAMESPACE_SUMMARY.getTable(namespaceDbStore);
+    this.nsSummaryTask = nsSummaryTask;
   }
 
   @Override
@@ -81,6 +86,11 @@ public class ReconNamespaceSummaryManagerImpl
     this.namespaceDbStore.commitBatchOperation(rdbBatchOperation);
   }
 
+  @Override
+  public void rebuildNSSummaryTree(OMMetadataManager omMetadataManager) {
+    nsSummaryTask.reprocess(omMetadataManager);
+  }
+
   public Table getNSSummaryTable() {
     return nsSummaryTable;
   }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
index f00d83e64a..888ec5319f 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
@@ -132,6 +132,8 @@ public class NSSummaryTaskDbEventHandler {
       curNSSummary = new NSSummary();
     }
     curNSSummary.setDirName(dirName);
+    // Set the parent directory ID
+    curNSSummary.setParentId(parentObjectId);
     nsSummaryMap.put(objectId, curNSSummary);
 
     // Write the child dir list to the parent directory
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
index a88064d565..54da926601 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
@@ -35,6 +35,7 @@ import 
org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
@@ -42,12 +43,14 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.recon.ReconConstants;
 import org.apache.hadoop.ozone.recon.ReconTestInjector;
+import org.apache.hadoop.ozone.recon.ReconUtils;
 import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler;
 import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler;
 import org.apache.hadoop.ozone.recon.api.types.DUResponse;
-import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse;
-import org.apache.hadoop.ozone.recon.api.types.ResponseStatus;
+import org.apache.hadoop.ozone.recon.api.types.NSSummary;
 import org.apache.hadoop.ozone.recon.api.types.QuotaUsageResponse;
+import org.apache.hadoop.ozone.recon.api.types.ResponseStatus;
+import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse;
 import org.apache.hadoop.ozone.recon.common.CommonUtils;
 import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
 import org.apache.hadoop.ozone.recon.scm.ReconNodeManager;
@@ -57,9 +60,12 @@ import 
org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
 import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
 import 
org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl;
 import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithFSO;
+import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
+import org.mockito.ArgumentCaptor;
+import org.slf4j.Logger;
 
 import javax.ws.rs.core.Response;
 
@@ -74,8 +80,6 @@ import java.util.ArrayList;
 import java.util.Set;
 import java.util.HashSet;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 import static 
org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
 import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm;
@@ -83,8 +87,12 @@ import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyT
 import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
 import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO;
 import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.verify;
 
 /**
  * Test for NSSummary REST APIs with FSO.
@@ -114,6 +122,7 @@ public class TestNSSummaryEndpointWithFSO {
   private Path temporaryFolder;
 
   private ReconOMMetadataManager reconOMMetadataManager;
+  private ReconNamespaceSummaryManager reconNamespaceSummaryManager;
   private NSSummaryEndpoint nsSummaryEndpoint;
   private OzoneConfiguration ozoneConfiguration;
   private CommonUtils commonUtils;
@@ -375,7 +384,7 @@ public class TestNSSummaryEndpointWithFSO {
                             mock(StorageContainerServiceProviderImpl.class))
                     .addBinding(NSSummaryEndpoint.class)
                     .build();
-    ReconNamespaceSummaryManager reconNamespaceSummaryManager =
+    this.reconNamespaceSummaryManager =
         reconTestInjector.getInstance(ReconNamespaceSummaryManager.class);
     nsSummaryEndpoint = reconTestInjector.getInstance(NSSummaryEndpoint.class);
 
@@ -696,6 +705,151 @@ public class TestNSSummaryEndpointWithFSO {
     }
   }
 
+  @Test
+  public void testConstructFullPath() throws IOException {
+    OmKeyInfo keyInfo = new OmKeyInfo.Builder()
+        .setKeyName("file2")
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_ONE)
+        .setObjectID(KEY_TWO_OBJECT_ID)
+        .setParentObjectID(DIR_TWO_OBJECT_ID)
+        .build();
+    // Call constructFullPath and verify the result
+    String fullPath = ReconUtils.constructFullPath(keyInfo,
+        reconNamespaceSummaryManager, reconOMMetadataManager);
+    String expectedPath = "vol/bucket1/dir1/dir2/file2";
+    Assertions.assertEquals(expectedPath, fullPath);
+
+    // Create key info for file 3
+    keyInfo = new OmKeyInfo.Builder()
+        .setKeyName("file3")
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_ONE)
+        .setObjectID(KEY_THREE_OBJECT_ID)
+        .setParentObjectID(DIR_THREE_OBJECT_ID)
+        .build();
+    fullPath = ReconUtils.constructFullPath(keyInfo,
+        reconNamespaceSummaryManager, reconOMMetadataManager);
+    expectedPath = "vol/bucket1/dir1/dir3/file3";
+    Assertions.assertEquals(expectedPath, fullPath);
+
+    // Create key info for file 6
+    keyInfo = new OmKeyInfo.Builder()
+        .setKeyName("file6")
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_ONE)
+        .setObjectID(KEY_SIX_OBJECT_ID)
+        .setParentObjectID(DIR_FOUR_OBJECT_ID)
+        .build();
+    fullPath = ReconUtils.constructFullPath(keyInfo,
+        reconNamespaceSummaryManager, reconOMMetadataManager);
+    expectedPath = "vol/bucket1/dir1/dir4/file6";
+    Assertions.assertEquals(expectedPath, fullPath);
+
+    // Create key info for file 1
+    keyInfo = new OmKeyInfo.Builder()
+        .setKeyName("file1")
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_ONE)
+        .setObjectID(KEY_ONE_OBJECT_ID)
+        .setParentObjectID(BUCKET_ONE_OBJECT_ID)
+        .build();
+    fullPath = ReconUtils.constructFullPath(keyInfo,
+        reconNamespaceSummaryManager, reconOMMetadataManager);
+    expectedPath = "vol/bucket1/file1";
+    Assertions.assertEquals(expectedPath, fullPath);
+
+    // Create key info for file 9
+    keyInfo = new OmKeyInfo.Builder()
+        .setKeyName("file9")
+        .setVolumeName(VOL_TWO)
+        .setBucketName(BUCKET_THREE)
+        .setObjectID(KEY_NINE_OBJECT_ID)
+        .setParentObjectID(DIR_FIVE_OBJECT_ID)
+        .build();
+    fullPath = ReconUtils.constructFullPath(keyInfo,
+        reconNamespaceSummaryManager, reconOMMetadataManager);
+    expectedPath = "vol2/bucket3/dir5/file9";
+    Assertions.assertEquals(expectedPath, fullPath);
+
+    // Check for when we encounter a NSSUmamry with parentId -1
+    // Fetch NSSummary for dir1 and immediately update its parentId.
+    NSSummary dir1Summary = 
reconNamespaceSummaryManager.getNSSummary(DIR_ONE_OBJECT_ID);
+    dir1Summary.setParentId(-1);  // Update parentId to -1
+
+    reconNamespaceSummaryManager.deleteNSSummary(DIR_ONE_OBJECT_ID);
+    reconNamespaceSummaryManager.storeNSSummary(DIR_ONE_OBJECT_ID, 
dir1Summary);
+
+    NSSummary changedDir1Summary = 
reconNamespaceSummaryManager.getNSSummary(DIR_ONE_OBJECT_ID);
+    Assertions.assertEquals(-1, changedDir1Summary.getParentId(), "The 
parentId should be updated to -1");
+
+    keyInfo = new OmKeyInfo.Builder()
+        .setKeyName("file2")
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_ONE)
+        .setObjectID(KEY_TWO_OBJECT_ID)
+        .setParentObjectID(DIR_TWO_OBJECT_ID)
+        .build();
+    // Call constructFullPath and verify the result
+    fullPath = ReconUtils.constructFullPath(keyInfo,
+        reconNamespaceSummaryManager, reconOMMetadataManager);
+  }
+
+  @Test
+  public void testConstructFullPathWithNegativeParentIdTriggersRebuild() 
throws IOException {
+    // Setup
+    long dirOneObjectId = 1L; // Sample object ID for the directory
+    ReconNamespaceSummaryManager mockSummaryManager = 
mock(ReconNamespaceSummaryManager.class);
+    ReconOMMetadataManager mockMetadataManager = 
mock(ReconOMMetadataManager.class);
+    NSSummary dir1Summary = new NSSummary();
+    dir1Summary.setParentId(-1); // Simulate directory at the top of the tree
+    
when(mockSummaryManager.getNSSummary(dirOneObjectId)).thenReturn(dir1Summary);
+
+    OmKeyInfo keyInfo = new OmKeyInfo.Builder()
+        .setKeyName("file2")
+        .setVolumeName("vol")
+        .setBucketName("bucket1")
+        .setObjectID(2L)
+        .setParentObjectID(dirOneObjectId)
+        .build();
+
+    String result = ReconUtils.constructFullPath(keyInfo, mockSummaryManager, 
mockMetadataManager);
+    assertEquals("", result, "Expected an empty string return due to rebuild 
trigger");
+  }
+
+  @Test
+  public void testLoggingWhenParentIdIsNegative() throws IOException {
+    ReconNamespaceSummaryManager mockManager =
+        mock(ReconNamespaceSummaryManager.class);
+    Logger mockLogger = mock(Logger.class);
+    ReconUtils.setLogger(mockLogger);
+
+    NSSummary mockSummary = new NSSummary();
+    mockSummary.setParentId(-1);
+    when(mockManager.getNSSummary(anyLong())).thenReturn(mockSummary);
+
+    OmKeyInfo keyInfo = new OmKeyInfo.Builder()
+        .setKeyName("testKey")
+        .setVolumeName("vol")
+        .setBucketName("bucket")
+        .setObjectID(1L)
+        .setParentObjectID(1L)
+        .build();
+
+    ReconUtils.constructFullPath(keyInfo, mockManager, null);
+
+    // Assert
+    ArgumentCaptor<String> logCaptor = ArgumentCaptor.forClass(String.class);
+    verify(mockLogger).warn(logCaptor.capture());
+    String loggedMessage = logCaptor.getValue();
+
+    // Here we can assert the exact message we expect to see in the logs.
+    assertEquals(
+        "NSSummary tree is currently being rebuilt, returning empty string " +
+            "for path construction.", loggedMessage);
+  }
+
+
   /**
    * Write directories and keys info into OM DB.
    * @throws Exception
@@ -1252,7 +1406,7 @@ public class TestNSSummaryEndpointWithFSO {
   }
 
   private static SCMNodeStat getMockSCMRootStat() {
-    return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, 
+    return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE,
         ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1);
   }
 }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
index a5064ba5be..dba245ce8b 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
@@ -43,6 +44,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.recon.ReconConstants;
 import org.apache.hadoop.ozone.recon.ReconTestInjector;
+import org.apache.hadoop.ozone.recon.ReconUtils;
 import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler;
 import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler;
 import org.apache.hadoop.ozone.recon.api.types.DUResponse;
@@ -58,6 +60,7 @@ import 
org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
 import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
 import 
org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl;
 import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithLegacy;
+import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
@@ -115,6 +118,7 @@ public class TestNSSummaryEndpointWithLegacy {
   @TempDir
   private Path temporaryFolder;
 
+  private ReconNamespaceSummaryManager reconNamespaceSummaryManager;
   private ReconOMMetadataManager reconOMMetadataManager;
   private NSSummaryEndpoint nsSummaryEndpoint;
   private OzoneConfiguration conf;
@@ -378,7 +382,7 @@ public class TestNSSummaryEndpointWithLegacy {
                 mock(StorageContainerServiceProviderImpl.class))
             .addBinding(NSSummaryEndpoint.class)
             .build();
-    ReconNamespaceSummaryManager reconNamespaceSummaryManager =
+    this.reconNamespaceSummaryManager =
         reconTestInjector.getInstance(ReconNamespaceSummaryManager.class);
     nsSummaryEndpoint = reconTestInjector.getInstance(NSSummaryEndpoint.class);
 
@@ -694,6 +698,48 @@ public class TestNSSummaryEndpointWithLegacy {
     }
   }
 
+  @Test
+  public void testConstructFullPath() throws IOException {
+    // For Key Tables the parent object ID is not set hence it
+    // will by default be set as -1 when the NSSummary object is created
+    OmKeyInfo keyInfo = new OmKeyInfo.Builder()
+        .setKeyName("dir1/dir2/file2")
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_ONE)
+        .setObjectID(KEY_TWO_OBJECT_ID)
+        .build();
+    // Call constructFullPath and verify the result
+    String fullPath = ReconUtils.constructFullPath(keyInfo,
+        reconNamespaceSummaryManager, reconOMMetadataManager);
+    String expectedPath = "vol/bucket1/dir1/dir2/file2";
+    Assertions.assertEquals(expectedPath, fullPath);
+
+    // Create key info for file 3
+    keyInfo = new OmKeyInfo.Builder()
+        .setKeyName("dir1/dir2/")
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_ONE)
+        .setObjectID(DIR_TWO_OBJECT_ID)
+        .build();
+    fullPath = ReconUtils.constructFullPath(keyInfo,
+        reconNamespaceSummaryManager, reconOMMetadataManager);
+    expectedPath = "vol/bucket1/dir1/dir2/";
+    Assertions.assertEquals(expectedPath, fullPath);
+
+    // Create key info for file 6
+    keyInfo = new OmKeyInfo.Builder()
+        .setKeyName("dir1/dir4/file6")
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_ONE)
+        .setObjectID(KEY_SIX_OBJECT_ID)
+        .build();
+    fullPath = ReconUtils.constructFullPath(keyInfo,
+        reconNamespaceSummaryManager, reconOMMetadataManager);
+    expectedPath = "vol/bucket1/dir1/dir4/file6";
+    Assertions.assertEquals(expectedPath, fullPath);
+  }
+
+
   /**
    * Write directories and keys info into OM DB.
    * @throws Exception
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
index ce8aa72963..6a2f2c557d 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
@@ -38,13 +38,15 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.BucketLayout;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.recon.ReconConstants;
 import org.apache.hadoop.ozone.recon.ReconTestInjector;
+import org.apache.hadoop.ozone.recon.ReconUtils;
 import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler;
 import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler;
 import org.apache.hadoop.ozone.recon.api.types.BucketObjectDBInfo;
@@ -65,6 +67,7 @@ import 
org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
 import 
org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl;
 import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithLegacy;
 import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithOBS;
+import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
@@ -109,25 +112,26 @@ import static org.mockito.Mockito.when;
  * .
  * └── vol
  *     ├── bucket1 (OBS)
- *     │   ├── file1
- *     │   ├── file2
- *     │   └── file3
+ *     │   ├── KEY_ONE
+ *     │   ├── KEY_TWO
+ *     │   └── KEY_THREE
  *     └── bucket2 (OBS)
- *         ├── file4
- *         └── file5
+ *         ├── KEY_FOUR
+ *         └── KEY_FIVE
  * └── vol2
  *     ├── bucket3 (Legacy)
- *     │   ├── file8
- *     │   ├── file9
- *     │   └── file10
+ *     │   ├── KEY_EIGHT
+ *     │   ├── KEY_NINE
+ *     │   └── KEY_TEN
  *     └── bucket4 (Legacy)
- *         └── file11
+ *         └── KEY_ELEVEN
  */
 public class TestNSSummaryEndpointWithOBSAndLegacy {
   @TempDir
   private Path temporaryFolder;
 
   private ReconOMMetadataManager reconOMMetadataManager;
+  private ReconNamespaceSummaryManager reconNamespaceSummaryManager;
   private NSSummaryEndpoint nsSummaryEndpoint;
   private OzoneConfiguration conf;
   private CommonUtils commonUtils;
@@ -374,7 +378,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
                 mock(StorageContainerServiceProviderImpl.class))
             .addBinding(NSSummaryEndpoint.class)
             .build();
-    ReconNamespaceSummaryManager reconNamespaceSummaryManager =
+    reconNamespaceSummaryManager =
         reconTestInjector.getInstance(ReconNamespaceSummaryManager.class);
     nsSummaryEndpoint = reconTestInjector.getInstance(NSSummaryEndpoint.class);
 
@@ -904,6 +908,54 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
         
OmUtils.normalizePathUptoBucket("volume/bucket/key$%#1/./////////key$%#2"));
   }
 
+  @Test
+  public void testConstructFullPath() throws IOException {
+    OmKeyInfo keyInfo = new OmKeyInfo.Builder()
+        .setKeyName(KEY_TWO)
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_ONE)
+        .setObjectID(KEY_TWO_OBJECT_ID)
+        .build();
+    String fullPath = ReconUtils.constructFullPath(keyInfo,
+        reconNamespaceSummaryManager, reconOMMetadataManager);
+    String expectedPath = "vol/bucket1/" + KEY_TWO;
+    Assertions.assertEquals(expectedPath, fullPath);
+
+    keyInfo = new OmKeyInfo.Builder()
+        .setKeyName(KEY_FIVE)
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_TWO)
+        .setObjectID(KEY_FIVE_OBJECT_ID)
+        .build();
+    fullPath = ReconUtils.constructFullPath(keyInfo,
+        reconNamespaceSummaryManager, reconOMMetadataManager);
+    expectedPath = "vol/bucket2/" + KEY_FIVE;
+    Assertions.assertEquals(expectedPath, fullPath);
+
+    keyInfo = new OmKeyInfo.Builder()
+        .setKeyName(KEY_EIGHT)
+        .setVolumeName(VOL_TWO)
+        .setBucketName(BUCKET_THREE)
+        .setObjectID(KEY_EIGHT_OBJECT_ID)
+        .build();
+    fullPath = ReconUtils.constructFullPath(keyInfo,
+        reconNamespaceSummaryManager, reconOMMetadataManager);
+    expectedPath = "vol2/bucket3/" + KEY_EIGHT;
+    Assertions.assertEquals(expectedPath, fullPath);
+
+
+    keyInfo = new OmKeyInfo.Builder()
+        .setKeyName(KEY_ELEVEN)
+        .setVolumeName(VOL_TWO)
+        .setBucketName(BUCKET_FOUR)
+        .setObjectID(KEY_ELEVEN_OBJECT_ID)
+        .build();
+    fullPath = ReconUtils.constructFullPath(keyInfo,
+        reconNamespaceSummaryManager, reconOMMetadataManager);
+    expectedPath = "vol2/bucket4/" + KEY_ELEVEN;
+    Assertions.assertEquals(expectedPath, fullPath);
+  }
+
 
   /**
    * Testing the following case.
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
index fbddd50ee4..f0af066c46 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
@@ -114,9 +114,9 @@ public class TestReconNamespaceSummaryManagerImpl {
 
   private void putThreeNSMetadata() throws IOException {
     HashMap<Long, NSSummary> hmap = new HashMap<>();
-    hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1"));
-    hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2"));
-    hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3"));
+    hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1", -1));
+    hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2", -1));
+    hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3", -1));
     RDBBatchOperation rdbBatchOperation = new RDBBatchOperation();
     for (Map.Entry entry: hmap.entrySet()) {
       reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation,
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
index 66c522cb4d..ba2e749741 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
@@ -52,8 +52,8 @@ import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirT
 import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm;
 import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD;
 import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
 
 /**
  * Test for NSSummaryTaskWithFSO.
@@ -270,6 +270,37 @@ public final class TestNSSummaryTaskWithFSO {
       assertEquals(DIR_ONE, nsSummaryInDir1.getDirName());
       assertEquals(DIR_TWO, nsSummaryInDir2.getDirName());
     }
+
+    @Test
+    public void testDirectoryParentIdAssignment() throws Exception {
+      // Trigger reprocess to simulate reading from OM DB and processing into 
NSSummary.
+      nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager);
+
+      // Fetch NSSummary for DIR_ONE and verify its parent ID matches 
BUCKET_ONE_OBJECT_ID.
+      NSSummary nsSummaryDirOne =
+          reconNamespaceSummaryManager.getNSSummary(DIR_ONE_OBJECT_ID);
+      assertNotNull(nsSummaryDirOne,
+          "NSSummary for DIR_ONE should not be null.");
+      assertEquals(BUCKET_ONE_OBJECT_ID, nsSummaryDirOne.getParentId(),
+          "DIR_ONE's parent ID should match BUCKET_ONE_OBJECT_ID.");
+
+      // Fetch NSSummary for DIR_TWO and verify its parent ID matches 
DIR_ONE_OBJECT_ID.
+      NSSummary nsSummaryDirTwo =
+          reconNamespaceSummaryManager.getNSSummary(DIR_TWO_OBJECT_ID);
+      assertNotNull(nsSummaryDirTwo,
+          "NSSummary for DIR_TWO should not be null.");
+      assertEquals(DIR_ONE_OBJECT_ID, nsSummaryDirTwo.getParentId(),
+          "DIR_TWO's parent ID should match DIR_ONE_OBJECT_ID.");
+
+      // Fetch NSSummary for DIR_THREE and verify its parent ID matches 
DIR_ONE_OBJECT_ID.
+      NSSummary nsSummaryDirThree =
+          reconNamespaceSummaryManager.getNSSummary(DIR_THREE_OBJECT_ID);
+      assertNotNull(nsSummaryDirThree,
+          "NSSummary for DIR_THREE should not be null.");
+      assertEquals(DIR_ONE_OBJECT_ID, nsSummaryDirThree.getParentId(),
+          "DIR_THREE's parent ID should match DIR_ONE_OBJECT_ID.");
+    }
+
   }
 
   /**
@@ -462,6 +493,27 @@ public final class TestNSSummaryTaskWithFSO {
       // after renaming dir1, check its new name
       assertEquals(DIR_ONE_RENAME, nsSummaryForDir1.getDirName());
     }
+
+    @Test
+    public void testParentIdAfterProcessEventBatch() throws IOException {
+
+      // Verify the parent ID of DIR_FOUR after it's added under BUCKET_ONE.
+      NSSummary nsSummaryDirFour =
+          reconNamespaceSummaryManager.getNSSummary(DIR_FOUR_OBJECT_ID);
+      assertNotNull(nsSummaryDirFour,
+          "NSSummary for DIR_FOUR should not be null.");
+      assertEquals(BUCKET_ONE_OBJECT_ID, nsSummaryDirFour.getParentId(),
+          "DIR_FOUR's parent ID should match BUCKET_ONE_OBJECT_ID.");
+
+      // Verify the parent ID of DIR_FIVE after it's added under BUCKET_TWO.
+      NSSummary nsSummaryDirFive =
+          reconNamespaceSummaryManager.getNSSummary(DIR_FIVE_OBJECT_ID);
+      assertNotNull(nsSummaryDirFive,
+          "NSSummary for DIR_FIVE should not be null.");
+      assertEquals(BUCKET_TWO_OBJECT_ID, nsSummaryDirFive.getParentId(),
+          "DIR_FIVE's parent ID should match BUCKET_TWO_OBJECT_ID.");
+    }
+
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to