This is an automated email from the ASF dual-hosted git repository.

devesh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new cb5d51983d HDDS-9534. Support namespace summaries (du, dist & counts) 
for LEGACY buckets with file system disabled (#5517)
cb5d51983d is described below

commit cb5d51983d044734a714f32a32255dc9604eb701
Author: Arafat2198 <[email protected]>
AuthorDate: Fri Mar 29 22:28:08 2024 +0530

    HDDS-9534. Support namespace summaries (du, dist & counts) for LEGACY 
buckets with file system disabled (#5517)
---
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |  41 ++
 .../ozone/recon/api/handlers/BucketHandler.java    |  35 +-
 .../ozone/recon/api/handlers/EntityHandler.java    | 103 +++-
 .../recon/recovery/ReconOMMetadataManager.java     |   8 +
 .../recon/recovery/ReconOmMetadataManagerImpl.java |   5 +
 .../hadoop/ozone/recon/tasks/NSSummaryTask.java    |   3 +-
 .../ozone/recon/tasks/NSSummaryTaskWithLegacy.java | 308 +++++++-----
 .../ozone/recon/OMMetadataManagerTestUtils.java    |  16 +-
 .../recon/api/TestNSSummaryEndpointWithLegacy.java |  11 +-
 ... => TestNSSummaryEndpointWithOBSAndLegacy.java} | 445 ++++++++++++++---
 .../TestNSSummaryTaskWithLegacyOBSLayout.java      | 554 +++++++++++++++++++++
 11 files changed, 1303 insertions(+), 226 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index d58d922b0e..b4777c7a01 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -743,6 +743,47 @@ public final class OmUtils {
     return keyName;
   }
 
+  /**
+   * Normalizes a given path up to the bucket level.
+   *
+   * This method takes a path as input and normalises uptil the bucket level.
+   * It handles empty, removes leading slashes, and splits the path into
+   * segments. It then extracts the volume and bucket names, forming a
+   * normalized path with a single slash. Finally, any remaining segments are
+   * joined as the key name, returning the complete standardized path.
+   *
+   * @param path The path string to be normalized.
+   * @return The normalized path string.
+   */
+  public static String normalizePathUptoBucket(String path) {
+    if (path == null || path.isEmpty()) {
+      return OM_KEY_PREFIX; // Handle empty path
+    }
+
+    // Remove leading slashes
+    path = path.replaceAll("^/*", "");
+
+    String[] segments = path.split(OM_KEY_PREFIX, -1);
+
+    String volumeName = segments[0];
+    String bucketName = segments.length > 1 ? segments[1] : "";
+
+    // Combine volume and bucket.
+    StringBuilder normalizedPath = new StringBuilder(volumeName);
+    if (!bucketName.isEmpty()) {
+      normalizedPath.append(OM_KEY_PREFIX).append(bucketName);
+    }
+
+    // Add remaining segments as the key
+    if (segments.length > 2) {
+      normalizedPath.append(OM_KEY_PREFIX).append(
+          String.join(OM_KEY_PREFIX,
+              Arrays.copyOfRange(segments, 2, segments.length)));
+    }
+
+    return normalizedPath.toString();
+  }
+
 
   /**
    * For a given service ID, return list of configured OM hosts.
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
index 34dcba40f8..266caaa2d8 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
@@ -17,9 +17,11 @@
  */
 package org.apache.hadoop.ozone.recon.api.handlers;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
 import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
@@ -163,6 +165,8 @@ public abstract class BucketHandler {
                 ReconOMMetadataManager omMetadataManager,
                 OzoneStorageContainerManager reconSCM,
                 OmBucketInfo bucketInfo) throws IOException {
+    // Check if enableFileSystemPaths flag is set to true.
+    boolean enableFileSystemPaths = isEnableFileSystemPaths(omMetadataManager);
 
     // If bucketInfo is null then entity type is UNKNOWN
     if (Objects.isNull(bucketInfo)) {
@@ -172,10 +176,17 @@ public abstract class BucketHandler {
           .equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) {
         return new FSOBucketHandler(reconNamespaceSummaryManager,
             omMetadataManager, reconSCM, bucketInfo);
-      } else if (bucketInfo.getBucketLayout()
-          .equals(BucketLayout.LEGACY)) {
-        return new LegacyBucketHandler(reconNamespaceSummaryManager,
-            omMetadataManager, reconSCM, bucketInfo);
+      } else if (bucketInfo.getBucketLayout().equals(BucketLayout.LEGACY)) {
+        // Choose handler based on enableFileSystemPaths flag for legacy 
layout.
+        // If enableFileSystemPaths is false, then the legacy bucket is treated
+        // as an OBS bucket.
+        if (enableFileSystemPaths) {
+          return new LegacyBucketHandler(reconNamespaceSummaryManager,
+              omMetadataManager, reconSCM, bucketInfo);
+        } else {
+          return new OBSBucketHandler(reconNamespaceSummaryManager,
+              omMetadataManager, reconSCM, bucketInfo);
+        }
       } else if (bucketInfo.getBucketLayout()
           .equals(BucketLayout.OBJECT_STORE)) {
         return new OBSBucketHandler(reconNamespaceSummaryManager,
@@ -188,6 +199,22 @@ public abstract class BucketHandler {
     }
   }
 
+  /**
+   * Determines whether FileSystemPaths are enabled for Legacy Buckets
+   * based on the Ozone configuration.
+   *
+   * @param ReconOMMetadataManager Instance
+   * @return True if FileSystemPaths are enabled, false otherwise.
+   */
+  private static boolean isEnableFileSystemPaths(ReconOMMetadataManager 
omMetadataManager) {
+    OzoneConfiguration configuration = 
omMetadataManager.getOzoneConfiguration();
+    if (configuration == null) {
+      configuration = new OzoneConfiguration();
+    }
+    return 
configuration.getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+        OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT);
+  }
+
   public static BucketHandler getBucketHandler(
       ReconNamespaceSummaryManager reconNamespaceSummaryManager,
       ReconOMMetadataManager omMetadataManager,
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java
index d12c7b6545..4f9e68ddff 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.recon.api.handlers;
 
 import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
 import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.recon.ReconConstants;
 import org.apache.hadoop.ozone.recon.api.types.NamespaceSummaryResponse;
 import org.apache.hadoop.ozone.recon.api.types.DUResponse;
@@ -60,9 +61,18 @@ public abstract class EntityHandler {
     this.omMetadataManager = omMetadataManager;
     this.reconSCM = reconSCM;
     this.bucketHandler = bucketHandler;
-    normalizedPath = normalizePath(path);
-    names = parseRequestPath(normalizedPath);
 
+    // Defaulting to FILE_SYSTEM_OPTIMIZED if bucketHandler is null
+    BucketLayout layout =
+        (bucketHandler != null) ? bucketHandler.getBucketLayout() :
+            BucketLayout.FILE_SYSTEM_OPTIMIZED;
+
+    // Normalize the path based on the determined layout
+    normalizedPath = normalizePath(path, layout);
+
+    // Choose the parsing method based on the bucket layout
+    names = (layout == BucketLayout.OBJECT_STORE) ?
+        parseObjectStorePath(normalizedPath) : 
parseRequestPath(normalizedPath);
   }
 
   public abstract NamespaceSummaryResponse getSummaryResponse()
@@ -118,7 +128,8 @@ public abstract class EntityHandler {
           String path) throws IOException {
     BucketHandler bucketHandler;
 
-    String normalizedPath = normalizePath(path);
+    String normalizedPath =
+        normalizePath(path, BucketLayout.FILE_SYSTEM_OPTIMIZED);
     String[] names = parseRequestPath(normalizedPath);
     if (path.equals(OM_KEY_PREFIX)) {
       return EntityType.ROOT.create(reconNamespaceSummaryManager,
@@ -156,23 +167,36 @@ public abstract class EntityHandler {
       String volName = names[0];
       String bucketName = names[1];
 
-      String keyName = BucketHandler.getKeyName(names);
-
+      // Assuming getBucketHandler already validates volume and bucket 
existence
       bucketHandler = BucketHandler.getBucketHandler(
-              reconNamespaceSummaryManager,
-              omMetadataManager, reconSCM,
-              volName, bucketName);
+          reconNamespaceSummaryManager, omMetadataManager, reconSCM, volName,
+          bucketName);
 
-      // check if either volume or bucket doesn't exist
-      if (bucketHandler == null
-          || !omMetadataManager.volumeExists(volName)
-          || !bucketHandler.bucketExists(volName, bucketName)) {
+      if (bucketHandler == null) {
         return EntityType.UNKNOWN.create(reconNamespaceSummaryManager,
-                omMetadataManager, reconSCM, null, path);
+            omMetadataManager, reconSCM, null, path);
+      }
+
+      // Directly handle path normalization and parsing based on the layout
+      if (bucketHandler.getBucketLayout() == BucketLayout.OBJECT_STORE) {
+        String[] parsedObjectLayoutPath = parseObjectStorePath(
+            normalizePath(path, bucketHandler.getBucketLayout()));
+        if (parsedObjectLayoutPath == null) {
+          return EntityType.UNKNOWN.create(reconNamespaceSummaryManager,
+              omMetadataManager, reconSCM, null, path);
+        }
+        // Use the key part directly from the parsed path
+        return bucketHandler.determineKeyPath(parsedObjectLayoutPath[2])
+            .create(reconNamespaceSummaryManager, omMetadataManager, reconSCM,
+                bucketHandler, path);
+      } else {
+        // Use the existing names array for non-OBJECT_STORE layouts to derive
+        // the keyName
+        String keyName = BucketHandler.getKeyName(names);
+        return bucketHandler.determineKeyPath(keyName)
+            .create(reconNamespaceSummaryManager, omMetadataManager, reconSCM,
+                bucketHandler, path);
       }
-      return bucketHandler.determineKeyPath(keyName)
-          .create(reconNamespaceSummaryManager,
-          omMetadataManager, reconSCM, bucketHandler, path);
     }
   }
 
@@ -256,7 +280,52 @@ public abstract class EntityHandler {
     return names;
   }
 
-  private static String normalizePath(String path) {
+  /**
+   * Splits an object store path into volume, bucket, and key name components.
+   *
+   * This method parses a path of the format "/volumeName/bucketName/keyName",
+   * including paths with additional '/' characters within the key name. It's
+   * designed for object store paths where the first three '/' characters
+   * separate the root, volume and bucket names from the key name.
+   *
+   * @param path The object store path to parse, starting with a slash.
+   * @return A String array with three elements: volume name, bucket name, and
+   * key name, or {null} if the path format is invalid.
+   */
+  public static String[] parseObjectStorePath(String path) {
+    // Removing the leading slash for correct splitting
+    path = path.substring(1);
+
+    // Splitting the modified path by "/", limiting to 3 parts
+    String[] parts = path.split("/", 3);
+
+    // Checking if we correctly obtained 3 parts after removing the leading 
slash
+    if (parts.length <= 3) {
+      return parts;
+    } else {
+      return null;
+    }
+  }
+
+  /**
+   * Normalizes a given path based on the specified bucket layout.
+   *
+   * This method adjusts the path according to the bucket layout.
+   * For {OBJECT_STORE Layout}, it normalizes the path up to the bucket level
+   * using OmUtils.normalizePathUptoBucket. For other layouts, it
+   * normalizes the entire path, including the key, using
+   * OmUtils.normalizeKey, and does not preserve any trailing slashes.
+   * The normalized path will always be prefixed with OM_KEY_PREFIX to ensure 
it
+   * is consistent with the expected format for object storage paths in Ozone.
+   *
+   * @param path
+   * @param bucketLayout
+   * @return A normalized path
+   */
+  private static String normalizePath(String path, BucketLayout bucketLayout) {
+    if (bucketLayout == BucketLayout.OBJECT_STORE) {
+      return OM_KEY_PREFIX + OmUtils.normalizePathUptoBucket(path);
+    }
     return OM_KEY_PREFIX + OmUtils.normalizeKey(path, false);
   }
 }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java
index 2040b7b343..1fc114eabd 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java
@@ -22,6 +22,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.List;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
@@ -105,4 +106,11 @@ public interface ReconOMMetadataManager extends 
OMMetadataManager {
    */
   List<OmBucketInfo> listBucketsUnderVolume(
       String volumeName) throws IOException;
+
+  /**
+   * Return the OzoneConfiguration instance used by Recon.
+   * @return
+   */
+  OzoneConfiguration getOzoneConfiguration();
+
 }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
index ad0526363d..4b041f6511 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
@@ -291,6 +291,11 @@ public class ReconOmMetadataManagerImpl extends 
OmMetadataManagerImpl
         Integer.MAX_VALUE);
   }
 
+  @Override
+  public OzoneConfiguration getOzoneConfiguration() {
+    return ozoneConfiguration;
+  }
+
   private List<OmBucketInfo> listAllBuckets(final int maxNumberOfBuckets)
       throws IOException {
     List<OmBucketInfo> result = new ArrayList<>();
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java
index 5c33950844..30fdb7c129 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java
@@ -63,7 +63,7 @@ import java.util.concurrent.TimeUnit;
  */
 public class NSSummaryTask implements ReconOmTask {
   private static final Logger LOG =
-          LoggerFactory.getLogger(NSSummaryTask.class);
+      LoggerFactory.getLogger(NSSummaryTask.class);
 
   private final ReconNamespaceSummaryManager reconNamespaceSummaryManager;
   private final ReconOMMetadataManager reconOMMetadataManager;
@@ -173,4 +173,3 @@ public class NSSummaryTask implements ReconOmTask {
   }
 
 }
-
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java
index ec1ccd0542..4555b976ff 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java
@@ -47,7 +47,7 @@ import static 
org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
  */
 public class NSSummaryTaskWithLegacy extends NSSummaryTaskDbEventHandler {
 
-  private static final BucketLayout BUCKET_LAYOUT = BucketLayout.LEGACY;
+  private static final BucketLayout LEGACY_BUCKET_LAYOUT = BucketLayout.LEGACY;
 
   private static final Logger LOG =
       LoggerFactory.getLogger(NSSummaryTaskWithLegacy.class);
@@ -71,16 +71,17 @@ public class NSSummaryTaskWithLegacy extends 
NSSummaryTaskDbEventHandler {
   public boolean processWithLegacy(OMUpdateEventBatch events) {
     Iterator<OMDBUpdateEvent> eventIterator = events.getIterator();
     Map<Long, NSSummary> nsSummaryMap = new HashMap<>();
+    ReconOMMetadataManager metadataManager = getReconOMMetadataManager();
 
     while (eventIterator.hasNext()) {
-      OMDBUpdateEvent<String, ? extends
-          WithParentObjectId> omdbUpdateEvent = eventIterator.next();
+      OMDBUpdateEvent<String, ? extends WithParentObjectId> omdbUpdateEvent =
+          eventIterator.next();
       OMDBUpdateEvent.OMDBUpdateAction action = omdbUpdateEvent.getAction();
 
       // we only process updates on OM's KeyTable
       String table = omdbUpdateEvent.getTable();
-      boolean updateOnKeyTable = table.equals(KEY_TABLE);
-      if (!updateOnKeyTable) {
+
+      if (!table.equals(KEY_TABLE)) {
         continue;
       }
 
@@ -90,102 +91,26 @@ public class NSSummaryTaskWithLegacy extends 
NSSummaryTaskDbEventHandler {
         OMDBUpdateEvent<String, ?> keyTableUpdateEvent = omdbUpdateEvent;
         Object value = keyTableUpdateEvent.getValue();
         Object oldValue = keyTableUpdateEvent.getOldValue();
+
         if (!(value instanceof OmKeyInfo)) {
           LOG.warn("Unexpected value type {} for key {}. Skipping processing.",
               value.getClass().getName(), updatedKey);
           continue;
         }
+
         OmKeyInfo updatedKeyInfo = (OmKeyInfo) value;
         OmKeyInfo oldKeyInfo = (OmKeyInfo) oldValue;
 
-        // KeyTable entries belong to both Legacy and OBS buckets.
-        // Check bucket layout and if it's OBS
-        // continue to the next iteration.
-        // Check just for the current KeyInfo.
-        String volumeName = updatedKeyInfo.getVolumeName();
-        String bucketName = updatedKeyInfo.getBucketName();
-        String bucketDBKey = getReconOMMetadataManager()
-            .getBucketKey(volumeName, bucketName);
-        // Get bucket info from bucket table
-        OmBucketInfo omBucketInfo = getReconOMMetadataManager()
-            .getBucketTable().getSkipCache(bucketDBKey);
-
-        if (omBucketInfo.getBucketLayout()
-            .isObjectStore(enableFileSystemPaths)) {
+        if (!isBucketLayoutValid(metadataManager, updatedKeyInfo)) {
           continue;
         }
 
-        setKeyParentID(updatedKeyInfo);
-
-        if (!updatedKeyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) {
-          switch (action) {
-          case PUT:
-            handlePutKeyEvent(updatedKeyInfo, nsSummaryMap);
-            break;
-
-          case DELETE:
-            handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap);
-            break;
-
-          case UPDATE:
-            if (oldKeyInfo != null) {
-              // delete first, then put
-              setKeyParentID(oldKeyInfo);
-              handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap);
-            } else {
-              LOG.warn("Update event does not have the old keyInfo for {}.",
-                  updatedKey);
-            }
-            handlePutKeyEvent(updatedKeyInfo, nsSummaryMap);
-            break;
-
-          default:
-            LOG.debug("Skipping DB update event : {}",
-                omdbUpdateEvent.getAction());
-          }
+        if (enableFileSystemPaths) {
+          processWithFileSystemLayout(updatedKeyInfo, oldKeyInfo, action,
+              nsSummaryMap);
         } else {
-          OmDirectoryInfo updatedDirectoryInfo =
-              new OmDirectoryInfo.Builder()
-                  .setName(updatedKeyInfo.getKeyName())
-                  .setObjectID(updatedKeyInfo.getObjectID())
-                  .setParentObjectID(updatedKeyInfo.getParentObjectID())
-                  .build();
-
-          OmDirectoryInfo oldDirectoryInfo = null;
-
-          if (oldKeyInfo != null) {
-            oldDirectoryInfo =
-                new OmDirectoryInfo.Builder()
-                    .setName(oldKeyInfo.getKeyName())
-                    .setObjectID(oldKeyInfo.getObjectID())
-                    .setParentObjectID(oldKeyInfo.getParentObjectID())
-                    .build();
-          }
-
-          switch (action) {
-          case PUT:
-            handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap);
-            break;
-
-          case DELETE:
-            handleDeleteDirEvent(updatedDirectoryInfo, nsSummaryMap);
-            break;
-
-          case UPDATE:
-            if (oldDirectoryInfo != null) {
-              // delete first, then put
-              handleDeleteDirEvent(oldDirectoryInfo, nsSummaryMap);
-            } else {
-              LOG.warn("Update event does not have the old dirInfo for {}.",
-                  updatedKey);
-            }
-            handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap);
-            break;
-
-          default:
-            LOG.debug("Skipping DB update event : {}",
-                omdbUpdateEvent.getAction());
-          }
+          processWithObjectStoreLayout(updatedKeyInfo, oldKeyInfo, action,
+              nsSummaryMap);
         }
       } catch (IOException ioEx) {
         LOG.error("Unable to process Namespace Summary data in Recon DB. ",
@@ -206,12 +131,118 @@ public class NSSummaryTaskWithLegacy extends 
NSSummaryTaskDbEventHandler {
     return true;
   }
 
+  private void processWithFileSystemLayout(OmKeyInfo updatedKeyInfo,
+                                           OmKeyInfo oldKeyInfo,
+                                           OMDBUpdateEvent.OMDBUpdateAction 
action,
+                                           Map<Long, NSSummary> nsSummaryMap)
+      throws IOException {
+    setKeyParentID(updatedKeyInfo);
+
+    if (!updatedKeyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) {
+      switch (action) {
+      case PUT:
+        handlePutKeyEvent(updatedKeyInfo, nsSummaryMap);
+        break;
+
+      case DELETE:
+        handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap);
+        break;
+
+      case UPDATE:
+        if (oldKeyInfo != null) {
+          setKeyParentID(oldKeyInfo);
+          handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap);
+        } else {
+          LOG.warn("Update event does not have the old keyInfo for {}.",
+              updatedKeyInfo.getKeyName());
+        }
+        handlePutKeyEvent(updatedKeyInfo, nsSummaryMap);
+        break;
+
+      default:
+        LOG.debug("Skipping DB update event for Key: {}", action);
+      }
+    } else {
+      OmDirectoryInfo updatedDirectoryInfo = new OmDirectoryInfo.Builder()
+          .setName(updatedKeyInfo.getKeyName())
+          .setObjectID(updatedKeyInfo.getObjectID())
+          .setParentObjectID(updatedKeyInfo.getParentObjectID())
+          .build();
+
+      OmDirectoryInfo oldDirectoryInfo = null;
+
+      if (oldKeyInfo != null) {
+        oldDirectoryInfo =
+            new OmDirectoryInfo.Builder()
+                .setName(oldKeyInfo.getKeyName())
+                .setObjectID(oldKeyInfo.getObjectID())
+                .setParentObjectID(oldKeyInfo.getParentObjectID())
+                .build();
+      }
+
+      switch (action) {
+      case PUT:
+        handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap);
+        break;
+
+      case DELETE:
+        handleDeleteDirEvent(updatedDirectoryInfo, nsSummaryMap);
+        break;
+
+      case UPDATE:
+        if (oldDirectoryInfo != null) {
+          handleDeleteDirEvent(oldDirectoryInfo, nsSummaryMap);
+        } else {
+          LOG.warn("Update event does not have the old dirInfo for {}.",
+              updatedKeyInfo.getKeyName());
+        }
+        handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap);
+        break;
+
+      default:
+        LOG.debug("Skipping DB update event for Directory: {}", action);
+      }
+    }
+  }
+
+  private void processWithObjectStoreLayout(OmKeyInfo updatedKeyInfo,
+                                            OmKeyInfo oldKeyInfo,
+                                            OMDBUpdateEvent.OMDBUpdateAction 
action,
+                                            Map<Long, NSSummary> nsSummaryMap)
+      throws IOException {
+    setParentBucketId(updatedKeyInfo);
+
+    switch (action) {
+    case PUT:
+      handlePutKeyEvent(updatedKeyInfo, nsSummaryMap);
+      break;
+
+    case DELETE:
+      handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap);
+      break;
+
+    case UPDATE:
+      if (oldKeyInfo != null) {
+        setParentBucketId(oldKeyInfo);
+        handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap);
+      } else {
+        LOG.warn("Update event does not have the old keyInfo for {}.",
+            updatedKeyInfo.getKeyName());
+      }
+      handlePutKeyEvent(updatedKeyInfo, nsSummaryMap);
+      break;
+
+    default:
+      LOG.debug("Skipping DB update event for Key: {}", action);
+    }
+  }
+
   public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) {
     Map<Long, NSSummary> nsSummaryMap = new HashMap<>();
 
     try {
       Table<String, OmKeyInfo> keyTable =
-          omMetadataManager.getKeyTable(BUCKET_LAYOUT);
+          omMetadataManager.getKeyTable(LEGACY_BUCKET_LAYOUT);
 
       try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
           keyTableIter = keyTable.iterator()) {
@@ -223,30 +254,29 @@ public class NSSummaryTaskWithLegacy extends 
NSSummaryTaskDbEventHandler {
           // KeyTable entries belong to both Legacy and OBS buckets.
           // Check bucket layout and if it's OBS
           // continue to the next iteration.
-          String volumeName = keyInfo.getVolumeName();
-          String bucketName = keyInfo.getBucketName();
-          String bucketDBKey = omMetadataManager
-              .getBucketKey(volumeName, bucketName);
-          // Get bucket info from bucket table
-          OmBucketInfo omBucketInfo = omMetadataManager
-              .getBucketTable().getSkipCache(bucketDBKey);
-
-          if (omBucketInfo.getBucketLayout()
-              .isObjectStore(enableFileSystemPaths)) {
+          if (!isBucketLayoutValid((ReconOMMetadataManager) omMetadataManager,
+              keyInfo)) {
             continue;
           }
 
-          setKeyParentID(keyInfo);
-
-          if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) {
-            OmDirectoryInfo directoryInfo =
-                new OmDirectoryInfo.Builder()
-                    .setName(keyInfo.getKeyName())
-                    .setObjectID(keyInfo.getObjectID())
-                    .setParentObjectID(keyInfo.getParentObjectID())
-                    .build();
-            handlePutDirEvent(directoryInfo, nsSummaryMap);
+          if (enableFileSystemPaths) {
+            // The LEGACY bucket is a file system bucket.
+            setKeyParentID(keyInfo);
+
+            if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) {
+              OmDirectoryInfo directoryInfo =
+                  new OmDirectoryInfo.Builder()
+                      .setName(keyInfo.getKeyName())
+                      .setObjectID(keyInfo.getObjectID())
+                      .setParentObjectID(keyInfo.getParentObjectID())
+                      .build();
+              handlePutDirEvent(directoryInfo, nsSummaryMap);
+            } else {
+              handlePutKeyEvent(keyInfo, nsSummaryMap);
+            }
           } else {
+            // The LEGACY bucket is an object store bucket.
+            setParentBucketId(keyInfo);
             handlePutKeyEvent(keyInfo, nsSummaryMap);
           }
           if (!checkAndCallFlushToDB(nsSummaryMap)) {
@@ -290,7 +320,7 @@ public class NSSummaryTaskWithLegacy extends 
NSSummaryTaskDbEventHandler {
           getReconOMMetadataManager().getOzoneKey(keyInfo.getVolumeName(),
               keyInfo.getBucketName(), parentKeyName);
       OmKeyInfo parentKeyInfo = getReconOMMetadataManager()
-          .getKeyTable(BUCKET_LAYOUT)
+          .getKeyTable(LEGACY_BUCKET_LAYOUT)
           .getSkipCache(fullParentKeyName);
 
       if (parentKeyInfo != null) {
@@ -300,17 +330,53 @@ public class NSSummaryTaskWithLegacy extends 
NSSummaryTaskDbEventHandler {
             "NSSummaryTaskWithLegacy is null");
       }
     } else {
-      String bucketKey = getReconOMMetadataManager()
-          .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName());
-      OmBucketInfo parentBucketInfo =
-          getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey);
+      setParentBucketId(keyInfo);
+    }
+  }
 
-      if (parentBucketInfo != null) {
-        keyInfo.setParentObjectID(parentBucketInfo.getObjectID());
-      } else {
-        throw new IOException("ParentKeyInfo for " +
-            "NSSummaryTaskWithLegacy is null");
-      }
+  /**
+   * Set the parent object ID for a bucket.
+   *@paramkeyInfo
+   *@throwsIOException
+   */
+  private void setParentBucketId(OmKeyInfo keyInfo)
+      throws IOException {
+    String bucketKey = getReconOMMetadataManager()
+        .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName());
+    OmBucketInfo parentBucketInfo =
+        getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey);
+
+    if (parentBucketInfo != null) {
+      keyInfo.setParentObjectID(parentBucketInfo.getObjectID());
+    } else {
+      throw new IOException("ParentKeyInfo for " +
+          "NSSummaryTaskWithLegacy is null");
     }
   }
+
+  /**
+   * Check if the bucket layout is LEGACY.
+   * @param metadataManager
+   * @param keyInfo
+   * @return
+   */
+  private boolean isBucketLayoutValid(ReconOMMetadataManager metadataManager,
+                                      OmKeyInfo keyInfo)
+      throws IOException {
+    String volumeName = keyInfo.getVolumeName();
+    String bucketName = keyInfo.getBucketName();
+    String bucketDBKey = metadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+        metadataManager.getBucketTable().getSkipCache(bucketDBKey);
+
+    if (omBucketInfo.getBucketLayout() != LEGACY_BUCKET_LAYOUT) {
+      LOG.debug(
+          "Skipping processing for bucket {} as bucket layout is not LEGACY",
+          bucketName);
+      return false;
+    }
+
+    return true;
+  }
+
 }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java
index b1aecc9a4f..a9ed342faa 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
  */
 public final class OMMetadataManagerTestUtils {
 
+  private static OzoneConfiguration configuration;
   private OMMetadataManagerTestUtils() {
   }
 
@@ -129,8 +130,9 @@ public final class OMMetadataManagerTestUtils {
     DBCheckpoint checkpoint = omMetadataManager.getStore()
         .getCheckpoint(true);
     assertNotNull(checkpoint.getCheckpointLocation());
-
-    OzoneConfiguration configuration = new OzoneConfiguration();
+    if (configuration == null) {
+      configuration = new OzoneConfiguration();
+    }
     configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, reconOmDbDir
         .getAbsolutePath());
 
@@ -501,4 +503,14 @@ public final class OMMetadataManagerTestUtils {
   public static BucketLayout getBucketLayout() {
     return BucketLayout.DEFAULT;
   }
+
+  public static OzoneConfiguration getConfiguration() {
+    return configuration;
+  }
+
+  public static void setConfiguration(
+      OzoneConfiguration configuration) {
+    OMMetadataManagerTestUtils.configuration = configuration;
+  }
+
 }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
index ba00f843f4..765399f71e 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
@@ -74,15 +74,17 @@ import java.util.ArrayList;
 import java.util.Set;
 import java.util.HashSet;
 
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.setConfiguration;
+
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 import static 
org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider;
-import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -875,6 +877,7 @@ public class TestNSSummaryEndpointWithLegacy {
         omDbDir.getAbsolutePath());
     omConfiguration.set(OMConfigKeys
         .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true");
+    setConfiguration(omConfiguration);
     OMMetadataManager omMetadataManager = new OmMetadataManagerImpl(
         omConfiguration, null);
 
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
similarity index 71%
rename from 
hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java
rename to 
hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
index ac8dee5f09..8d8299aefc 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
@@ -33,6 +33,7 @@ import 
org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
@@ -62,6 +63,7 @@ import 
org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
 import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
 import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
 import 
org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl;
+import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithLegacy;
 import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithOBS;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
@@ -91,28 +93,37 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 /**
- * Test for NSSummary REST APIs with OBS.
- * Testing is done on a simple object store model with a flat hierarchy:
- * Testing the following case.
- *  ├── vol
- *  │   ├── bucket1
- *  │   │   ├── file1
- *  │   │   └── file2
- *  │   │   └── file3
- *  │   └── bucket2
- *  │       ├── file4
- *  │       └── file5
- *  └── vol2
- *      ├── bucket3
- *      │   ├── file8
- *      │   ├── file9
- *      │   └── file10
- *      └── bucket4
- *          └── file11
- * This tests the Rest APIs for NSSummary in the context of OBS buckets,
- * focusing on disk usage, quota usage, and file size distribution.
+ * Tests the NSSummary REST APIs within the context of an Object Store (OBS) 
layout,
+ * as well as Legacy layout buckets with FileSystemPaths disabled. The tests 
aim to
+ * validate API responses for buckets that follow the flat hierarchy model 
typical
+ * of OBS layouts.
+ * <p>
+ * The test environment simulates a simple object storage structure with 
volumes
+ * containing buckets, which in turn contain files. Specifically, it includes:
+ * - Two OBS layout buckets (bucket1 and bucket2) under 'vol', each containing
+ * multiple files.
+ * - Two Legacy layout buckets (bucket3 and bucket4) under 'vol2', with 
'bucket4'
+ * the fileSystemEnabled flag set to false for these legacy buckets.
+ * <p>
+ * The directory structure for testing is as follows:
+ * .
+ * └── vol
+ *     ├── bucket1 (OBS)
+ *     │   ├── file1
+ *     │   ├── file2
+ *     │   └── file3
+ *     └── bucket2 (OBS)
+ *         ├── file4
+ *         └── file5
+ * └── vol2
+ *     ├── bucket3 (Legacy)
+ *     │   ├── file8
+ *     │   ├── file9
+ *     │   └── file10
+ *     └── bucket4 (Legacy)
+ *         └── file11
  */
-public class TestNSSummaryEndpointWithOBS {
+public class TestNSSummaryEndpointWithOBSAndLegacy {
   @TempDir
   private Path temporaryFolder;
 
@@ -136,14 +147,14 @@ public class TestNSSummaryEndpointWithOBS {
   private static final String BUCKET_THREE = "bucket3";
   private static final String BUCKET_FOUR = "bucket4";
   private static final String KEY_ONE = "file1";
-  private static final String KEY_TWO = "file2";
-  private static final String KEY_THREE = "file3";
+  private static final String KEY_TWO = "////file2";
+  private static final String KEY_THREE = "file3///";
   private static final String KEY_FOUR = "file4";
-  private static final String KEY_FIVE = "file5";
+  private static final String KEY_FIVE = "_//////";
   private static final String KEY_EIGHT = "file8";
-  private static final String KEY_NINE = "file9";
-  private static final String KEY_TEN = "file10";
-  private static final String KEY_ELEVEN = "file11";
+  private static final String KEY_NINE = "//////";
+  private static final String KEY_TEN = "///__file10";
+  private static final String KEY_ELEVEN = "////file11";
   private static final String MULTI_BLOCK_FILE = KEY_THREE;
 
   private static final long PARENT_OBJECT_ID_ZERO = 0L;
@@ -256,6 +267,13 @@ public class TestNSSummaryEndpointWithOBS {
       + FILE2_SIZE_WITH_REPLICA
       + FILE3_SIZE_WITH_REPLICA;
 
+  private static final long
+      MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET3
+      = FILE8_SIZE_WITH_REPLICA +
+      FILE9_SIZE_WITH_REPLICA +
+      FILE10_SIZE_WITH_REPLICA;
+
+
   private static final long
       MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY
       = FILE4_SIZE_WITH_REPLICA;
@@ -278,7 +296,29 @@ public class TestNSSummaryEndpointWithOBS {
       ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE;
   private static final String BUCKET_TWO_PATH =
       ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO;
-  private static final String KEY_PATH =
+  private static final String BUCKET_THREE_PATH =
+      ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE;
+  private static final String BUCKET_FOUR_PATH =
+      ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR;
+  private static final String KEY_ONE_PATH =
+      ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_ONE;
+  private static final String KEY_TWO_PATH =
+      ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_TWO;
+  private static final String KEY_THREE_PATH =
+      ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_THREE;
+  private static final String KEY_FOUR_PATH =
+      ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR;
+  private static final String KEY_FIVE_PATH =
+      ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FIVE;
+  private static final String KEY_EIGHT_PATH =
+      ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_EIGHT;
+  private static final String KEY_NINE_PATH =
+      ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_NINE;
+  private static final String KEY_TEN_PATH =
+      ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_TEN;
+  private static final String KEY_ELEVEN_PATH =
+      ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR + ROOT_PATH + KEY_ELEVEN;
+  private static final String KEY4_PATH =
       ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR;
   private static final String MULTI_BLOCK_KEY_PATH =
       ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_THREE;
@@ -302,10 +342,17 @@ public class TestNSSummaryEndpointWithOBS {
   private static final long BUCKET_TWO_DATA_SIZE =
       FILE_FOUR_SIZE + FILE_FIVE_SIZE;
 
+  private static final long BUCKET_THREE_DATA_SIZE =
+      FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE;
+
+  private static final long BUCKET_FOUR_DATA_SIZE = FILE_ELEVEN_SIZE;
+
 
   @BeforeEach
   public void setUp() throws Exception {
     conf = new OzoneConfiguration();
+    // By setting this config our Legacy buckets will behave like OBS buckets.
+    conf.set(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, "false");
     OMMetadataManager omMetadataManager = initializeNewOmMetadataManager(
         Files.createDirectory(temporaryFolder.resolve(
             "JunitOmDBDir")).toFile(), conf);
@@ -337,6 +384,10 @@ public class TestNSSummaryEndpointWithOBS {
         new NSSummaryTaskWithOBS(reconNamespaceSummaryManager,
             reconOMMetadataManager, conf);
     nsSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager);
+    NSSummaryTaskWithLegacy nsSummaryTaskWithLegacy =
+        new NSSummaryTaskWithLegacy(reconNamespaceSummaryManager,
+            reconOMMetadataManager, conf);
+    nsSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager);
     commonUtils = new CommonUtils();
   }
 
@@ -381,6 +432,26 @@ public class TestNSSummaryEndpointWithOBS {
     assertEquals(-1, volResponseObj.getObjectDBInfo().getQuotaInNamespace());
   }
 
+  @Test
+  public void testGetBasicInfoVolTwo() throws Exception {
+    // Test volume 2's basics
+    Response volTwoResponse = nsSummaryEndpoint.getBasicInfo(VOL_TWO_PATH);
+    NamespaceSummaryResponse volTwoResponseObj =
+        (NamespaceSummaryResponse) volTwoResponse.getEntity();
+    assertEquals(EntityType.VOLUME,
+        volTwoResponseObj.getEntityType());
+    assertEquals(2, volTwoResponseObj.getCountStats().getNumBucket());
+    assertEquals(4, volTwoResponseObj.getCountStats().getNumTotalKey());
+    assertEquals(TEST_USER, ((VolumeObjectDBInfo) volTwoResponseObj.
+        getObjectDBInfo()).getAdmin());
+    assertEquals(TEST_USER, ((VolumeObjectDBInfo) volTwoResponseObj.
+        getObjectDBInfo()).getOwner());
+    assertEquals(VOL_TWO, volTwoResponseObj.getObjectDBInfo().getName());
+    assertEquals(2097152,
+        volTwoResponseObj.getObjectDBInfo().getQuotaInBytes());
+    assertEquals(-1, 
volTwoResponseObj.getObjectDBInfo().getQuotaInNamespace());
+  }
+
   @Test
   public void testGetBasicInfoBucketOne() throws Exception {
     // Test bucket 1's basics
@@ -395,7 +466,7 @@ public class TestNSSummaryEndpointWithOBS {
     assertEquals(StorageType.DISK,
         ((BucketObjectDBInfo)
             bucketOneObj.getObjectDBInfo()).getStorageType());
-    assertEquals(getBucketLayout(),
+    assertEquals(getOBSBucketLayout(),
         ((BucketObjectDBInfo)
             bucketOneObj.getObjectDBInfo()).getBucketLayout());
     assertEquals(BUCKET_ONE,
@@ -405,9 +476,64 @@ public class TestNSSummaryEndpointWithOBS {
   @Test
   public void testGetBasicInfoBucketTwo() throws Exception {
     // Test bucket 2's basics
-    commonUtils.testNSSummaryBasicInfoBucketTwo(
-        BucketLayout.OBJECT_STORE,
-        nsSummaryEndpoint);
+    Response bucketTwoResponse =
+        nsSummaryEndpoint.getBasicInfo(BUCKET_TWO_PATH);
+    NamespaceSummaryResponse bucketTwoObj =
+        (NamespaceSummaryResponse) bucketTwoResponse.getEntity();
+    assertEquals(EntityType.BUCKET, bucketTwoObj.getEntityType());
+    assertEquals(2, bucketTwoObj.getCountStats().getNumTotalKey());
+    assertEquals(VOL,
+        ((BucketObjectDBInfo) bucketTwoObj.getObjectDBInfo()).getVolumeName());
+    assertEquals(StorageType.DISK,
+        ((BucketObjectDBInfo)
+            bucketTwoObj.getObjectDBInfo()).getStorageType());
+    assertEquals(getOBSBucketLayout(),
+        ((BucketObjectDBInfo)
+            bucketTwoObj.getObjectDBInfo()).getBucketLayout());
+    assertEquals(BUCKET_TWO,
+        ((BucketObjectDBInfo) bucketTwoObj.getObjectDBInfo()).getName());
+  }
+
+  @Test
+  public void testGetBasicInfoBucketThree() throws Exception {
+    // Test bucket 3's basics
+    Response bucketThreeResponse =
+        nsSummaryEndpoint.getBasicInfo(BUCKET_THREE_PATH);
+    NamespaceSummaryResponse bucketThreeObj = (NamespaceSummaryResponse)
+        bucketThreeResponse.getEntity();
+    assertEquals(EntityType.BUCKET, bucketThreeObj.getEntityType());
+    assertEquals(3, bucketThreeObj.getCountStats().getNumTotalKey());
+    assertEquals(VOL_TWO,
+        ((BucketObjectDBInfo) 
bucketThreeObj.getObjectDBInfo()).getVolumeName());
+    assertEquals(StorageType.DISK,
+        ((BucketObjectDBInfo)
+            bucketThreeObj.getObjectDBInfo()).getStorageType());
+    assertEquals(getLegacyBucketLayout(),
+        ((BucketObjectDBInfo)
+            bucketThreeObj.getObjectDBInfo()).getBucketLayout());
+    assertEquals(BUCKET_THREE,
+        ((BucketObjectDBInfo) bucketThreeObj.getObjectDBInfo()).getName());
+  }
+
+  @Test
+  public void testGetBasicInfoBucketFour() throws Exception {
+    // Test bucket 4's basics
+    Response bucketFourResponse =
+        nsSummaryEndpoint.getBasicInfo(BUCKET_FOUR_PATH);
+    NamespaceSummaryResponse bucketFourObj =
+        (NamespaceSummaryResponse) bucketFourResponse.getEntity();
+    assertEquals(EntityType.BUCKET, bucketFourObj.getEntityType());
+    assertEquals(1, bucketFourObj.getCountStats().getNumTotalKey());
+    assertEquals(VOL_TWO,
+        ((BucketObjectDBInfo) 
bucketFourObj.getObjectDBInfo()).getVolumeName());
+    assertEquals(StorageType.DISK,
+        ((BucketObjectDBInfo)
+            bucketFourObj.getObjectDBInfo()).getStorageType());
+    assertEquals(getLegacyBucketLayout(),
+        ((BucketObjectDBInfo)
+            bucketFourObj.getObjectDBInfo()).getBucketLayout());
+    assertEquals(BUCKET_FOUR,
+        ((BucketObjectDBInfo) bucketFourObj.getObjectDBInfo()).getName());
   }
 
   @Test
@@ -461,24 +587,135 @@ public class TestNSSummaryEndpointWithOBS {
   }
 
   @Test
-  public void testDiskUsageBucket() throws Exception {
+  public void testDiskUsageVolTwo() throws Exception {
+    // volume level DU
+    Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_TWO_PATH,
+        false, false);
+    DUResponse duVolRes = (DUResponse) volResponse.getEntity();
+    assertEquals(2, duVolRes.getCount());
+    List<DUResponse.DiskUsage> duData = duVolRes.getDuData();
+    // sort based on subpath
+    Collections.sort(duData,
+        Comparator.comparing(DUResponse.DiskUsage::getSubpath));
+    DUResponse.DiskUsage duBucket3 = duData.get(0);
+    DUResponse.DiskUsage duBucket4 = duData.get(1);
+    assertEquals(BUCKET_THREE_PATH, duBucket3.getSubpath());
+    assertEquals(BUCKET_FOUR_PATH, duBucket4.getSubpath());
+    assertEquals(VOL_TWO_DATA_SIZE, duVolRes.getSize());
+  }
+
+  @Test
+  public void testDiskUsageBucketOne() throws Exception {
     // bucket level DU
     Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH,
         false, false);
     DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity();
     // There are no sub-paths under this OBS bucket.
     assertEquals(0, duBucketResponse.getCount());
+
+    Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage(
+        BUCKET_ONE_PATH, true, false);
+    DUResponse duBucketResponseWithFiles =
+        (DUResponse) bucketResponseWithSubpath.getEntity();
+    assertEquals(3, duBucketResponseWithFiles.getCount());
+
     assertEquals(BUCKET_ONE_DATA_SIZE, duBucketResponse.getSize());
   }
 
   @Test
-  public void testDiskUsageKey() throws Exception {
+  public void testDiskUsageBucketTwo() throws Exception {
+    // bucket level DU
+    Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_TWO_PATH,
+        false, false);
+    DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity();
+    // There are no sub-paths under this OBS bucket.
+    assertEquals(0, duBucketResponse.getCount());
+
+    Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage(
+        BUCKET_TWO_PATH, true, false);
+    DUResponse duBucketResponseWithFiles =
+        (DUResponse) bucketResponseWithSubpath.getEntity();
+    assertEquals(2, duBucketResponseWithFiles.getCount());
+
+    assertEquals(BUCKET_TWO_DATA_SIZE, duBucketResponse.getSize());
+  }
+
+  @Test
+  public void testDiskUsageBucketThree() throws Exception {
+    // bucket level DU
+    Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_THREE_PATH,
+        false, false);
+    DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity();
+    // There are no sub-paths under this Legacy bucket.
+    assertEquals(0, duBucketResponse.getCount());
+
+    Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage(
+        BUCKET_THREE_PATH, true, false);
+    DUResponse duBucketResponseWithFiles =
+        (DUResponse) bucketResponseWithSubpath.getEntity();
+    assertEquals(3, duBucketResponseWithFiles.getCount());
+
+    assertEquals(BUCKET_THREE_DATA_SIZE, duBucketResponse.getSize());
+  }
+
+  @Test
+  public void testDiskUsageKey1() throws Exception {
+    // key level DU
+    Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_ONE_PATH,
+        false, false);
+    DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity();
+    assertEquals(0, duKeyResponse.getCount());
+    assertEquals(FILE_ONE_SIZE, duKeyResponse.getSize());
+  }
+
+  @Test
+  public void testDiskUsageKey2() throws Exception {
     // key level DU
-    Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH,
+    Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_TWO_PATH,
         false, false);
-    DUResponse keyObj = (DUResponse) keyResponse.getEntity();
-    assertEquals(0, keyObj.getCount());
-    assertEquals(FILE_FOUR_SIZE, keyObj.getSize());
+    DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity();
+    assertEquals(0, duKeyResponse.getCount());
+    assertEquals(FILE_TWO_SIZE, duKeyResponse.getSize());
+  }
+
+  @Test
+  public void testDiskUsageKey4() throws Exception {
+    // key level DU
+    Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY4_PATH,
+        true, false);
+    DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity();
+    assertEquals(0, duKeyResponse.getCount());
+    assertEquals(FILE_FOUR_SIZE, duKeyResponse.getSize());
+  }
+
+  @Test
+  public void testDiskUsageKey5() throws Exception {
+    // key level DU
+    Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_FIVE_PATH,
+        false, false);
+    DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity();
+    assertEquals(0, duKeyResponse.getCount());
+    assertEquals(FILE_FIVE_SIZE, duKeyResponse.getSize());
+  }
+
+  @Test
+  public void testDiskUsageKey8() throws Exception {
+    // key level DU
+    Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_EIGHT_PATH,
+        false, false);
+    DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity();
+    assertEquals(0, duKeyResponse.getCount());
+    assertEquals(FILE_EIGHT_SIZE, duKeyResponse.getSize());
+  }
+
+  @Test
+  public void testDiskUsageKey11() throws Exception {
+    // key level DU
+    Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_ELEVEN_PATH,
+        false, false);
+    DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity();
+    assertEquals(0, duKeyResponse.getCount());
+    assertEquals(FILE_ELEVEN_SIZE, duKeyResponse.getSize());
   }
 
   @Test
@@ -531,7 +768,7 @@ public class TestNSSummaryEndpointWithOBS {
   }
 
   @Test
-  public void testDataSizeUnderBucketWithReplication() throws IOException {
+  public void testDataSizeUnderBucketOneWithReplication() throws IOException {
     setUpMultiBlockReplicatedKeys();
     Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH,
         false, true);
@@ -541,10 +778,21 @@ public class TestNSSummaryEndpointWithOBS {
         replicaDUResponse.getSizeWithReplica());
   }
 
+  @Test
+  public void testDataSizeUnderBucketThreeWithReplication() throws IOException 
{
+    setUpMultiBlockReplicatedKeys();
+    Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_THREE_PATH,
+        false, true);
+    DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity();
+    assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
+    assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET3,
+        replicaDUResponse.getSizeWithReplica());
+  }
+
   @Test
   public void testDataSizeUnderKeyWithReplication() throws IOException {
     setUpMultiBlockReplicatedKeys();
-    Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH,
+    Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY4_PATH,
         false, true);
     DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
@@ -579,8 +827,20 @@ public class TestNSSummaryEndpointWithOBS {
     assertEquals(BUCKET_TWO_QUOTA, quBucketRes2.getQuota());
     assertEquals(BUCKET_TWO_DATA_SIZE, quBucketRes2.getQuotaUsed());
 
+    Response bucketRes3 = nsSummaryEndpoint.getQuotaUsage(BUCKET_THREE_PATH);
+    QuotaUsageResponse quBucketRes3 =
+        (QuotaUsageResponse) bucketRes3.getEntity();
+    assertEquals(BUCKET_THREE_QUOTA, quBucketRes3.getQuota());
+    assertEquals(BUCKET_THREE_DATA_SIZE, quBucketRes3.getQuotaUsed());
+
+    Response bucketRes4 = nsSummaryEndpoint.getQuotaUsage(BUCKET_FOUR_PATH);
+    QuotaUsageResponse quBucketRes4 =
+        (QuotaUsageResponse) bucketRes4.getEntity();
+    assertEquals(BUCKET_FOUR_QUOTA, quBucketRes4.getQuota());
+    assertEquals(BUCKET_FOUR_DATA_SIZE, quBucketRes4.getQuotaUsed());
+
     // other level not applicable
-    Response naResponse2 = nsSummaryEndpoint.getQuotaUsage(KEY_PATH);
+    Response naResponse2 = nsSummaryEndpoint.getQuotaUsage(KEY4_PATH);
     QuotaUsageResponse quotaUsageResponse2 =
         (QuotaUsageResponse) naResponse2.getEntity();
     assertEquals(ResponseStatus.TYPE_NOT_APPLICABLE,
@@ -617,26 +877,55 @@ public class TestNSSummaryEndpointWithOBS {
     }
   }
 
+  @Test
+  public void testNormalizePathUptoBucket() {
+    // Test null or empty path
+    assertEquals("/", OmUtils.normalizePathUptoBucket(null));
+    assertEquals("/", OmUtils.normalizePathUptoBucket(""));
+
+    // Test path with leading slashes
+    assertEquals("volume1/bucket1/key1/key2",
+        OmUtils.normalizePathUptoBucket("///volume1/bucket1/key1/key2"));
+
+    // Test volume and bucket names
+    assertEquals("volume1/bucket1",
+        OmUtils.normalizePathUptoBucket("volume1/bucket1"));
+
+    // Test with additional segments
+    assertEquals("volume1/bucket1/key1/key2",
+        OmUtils.normalizePathUptoBucket("volume1/bucket1/key1/key2"));
+
+    // Test path with multiple slashes in key names.
+    assertEquals("volume1/bucket1/key1//key2",
+        OmUtils.normalizePathUptoBucket("volume1/bucket1/key1//key2"));
+
+    // Test path with volume, bucket, and special characters in keys
+    assertEquals("volume/bucket/key$%#1/./////////key$%#2",
+        
OmUtils.normalizePathUptoBucket("volume/bucket/key$%#1/./////////key$%#2"));
+  }
+
+
   /**
    * Testing the following case.
-   * ├── vol
-   * │   ├── bucket1
-   * │   │   ├── file1
-   * │   │   └── file2
-   * │   │   └── file3
-   * │   └── bucket2
-   * │       ├── file4
-   * │       └── file5
+   * └── vol
+   *     ├── bucket1 (OBS)
+   *     │   ├── file1
+   *     │   ├── file2
+   *     │   └── file3
+   *     └── bucket2 (OBS)
+   *         ├── file4
+   *         └── file5
    * └── vol2
-   *     ├── bucket3
+   *     ├── bucket3 (Legacy)
    *     │   ├── file8
    *     │   ├── file9
    *     │   └── file10
-   *     └── bucket4
+   *     └── bucket4 (Legacy)
    *         └── file11
    *
    * Write these keys to OM and
    * replicate them.
+   * @throws Exception
    */
   @SuppressWarnings("checkstyle:MethodLength")
   private void populateOMDB() throws Exception {
@@ -652,7 +941,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_ONE_OBJECT_ID,
         VOL_OBJECT_ID,
         FILE_ONE_SIZE,
-        getBucketLayout());
+        getOBSBucketLayout());
     writeKeyToOm(reconOMMetadataManager,
         KEY_TWO,
         BUCKET_ONE,
@@ -663,7 +952,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_ONE_OBJECT_ID,
         VOL_OBJECT_ID,
         FILE_TWO_SIZE,
-        getBucketLayout());
+        getOBSBucketLayout());
     writeKeyToOm(reconOMMetadataManager,
         KEY_THREE,
         BUCKET_ONE,
@@ -674,7 +963,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_ONE_OBJECT_ID,
         VOL_OBJECT_ID,
         FILE_THREE_SIZE,
-        getBucketLayout());
+        getOBSBucketLayout());
     writeKeyToOm(reconOMMetadataManager,
         KEY_FOUR,
         BUCKET_TWO,
@@ -685,7 +974,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_TWO_OBJECT_ID,
         VOL_OBJECT_ID,
         FILE_FOUR_SIZE,
-        getBucketLayout());
+        getOBSBucketLayout());
     writeKeyToOm(reconOMMetadataManager,
         KEY_FIVE,
         BUCKET_TWO,
@@ -696,7 +985,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_TWO_OBJECT_ID,
         VOL_OBJECT_ID,
         FILE_FIVE_SIZE,
-        getBucketLayout());
+        getOBSBucketLayout());
 
     writeKeyToOm(reconOMMetadataManager,
         KEY_EIGHT,
@@ -708,7 +997,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_THREE_OBJECT_ID,
         VOL_TWO_OBJECT_ID,
         FILE_EIGHT_SIZE,
-        getBucketLayout());
+        getLegacyBucketLayout());
     writeKeyToOm(reconOMMetadataManager,
         KEY_NINE,
         BUCKET_THREE,
@@ -719,7 +1008,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_THREE_OBJECT_ID,
         VOL_TWO_OBJECT_ID,
         FILE_NINE_SIZE,
-        getBucketLayout());
+        getLegacyBucketLayout());
     writeKeyToOm(reconOMMetadataManager,
         KEY_TEN,
         BUCKET_THREE,
@@ -730,7 +1019,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_THREE_OBJECT_ID,
         VOL_TWO_OBJECT_ID,
         FILE_TEN_SIZE,
-        getBucketLayout());
+        getLegacyBucketLayout());
     writeKeyToOm(reconOMMetadataManager,
         KEY_ELEVEN,
         BUCKET_FOUR,
@@ -741,7 +1030,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_FOUR_OBJECT_ID,
         VOL_TWO_OBJECT_ID,
         FILE_ELEVEN_SIZE,
-        getBucketLayout());
+        getLegacyBucketLayout());
   }
 
   /**
@@ -756,7 +1045,7 @@ public class TestNSSummaryEndpointWithOBS {
     omConfiguration.set(OZONE_OM_DB_DIRS,
         omDbDir.getAbsolutePath());
     omConfiguration.set(OMConfigKeys
-        .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true");
+        .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "false");
     OMMetadataManager omMetadataManager = new OmMetadataManagerImpl(
         omConfiguration, null);
 
@@ -788,7 +1077,7 @@ public class TestNSSummaryEndpointWithOBS {
         .setBucketName(BUCKET_ONE)
         .setObjectID(BUCKET_ONE_OBJECT_ID)
         .setQuotaInBytes(BUCKET_ONE_QUOTA)
-        .setBucketLayout(getBucketLayout())
+        .setBucketLayout(getOBSBucketLayout())
         .build();
 
     OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder()
@@ -796,7 +1085,7 @@ public class TestNSSummaryEndpointWithOBS {
         .setBucketName(BUCKET_TWO)
         .setObjectID(BUCKET_TWO_OBJECT_ID)
         .setQuotaInBytes(BUCKET_TWO_QUOTA)
-        .setBucketLayout(getBucketLayout())
+        .setBucketLayout(getOBSBucketLayout())
         .build();
 
     OmBucketInfo bucketInfo3 = OmBucketInfo.newBuilder()
@@ -804,7 +1093,7 @@ public class TestNSSummaryEndpointWithOBS {
         .setBucketName(BUCKET_THREE)
         .setObjectID(BUCKET_THREE_OBJECT_ID)
         .setQuotaInBytes(BUCKET_THREE_QUOTA)
-        .setBucketLayout(getBucketLayout())
+        .setBucketLayout(getLegacyBucketLayout())
         .build();
 
     OmBucketInfo bucketInfo4 = OmBucketInfo.newBuilder()
@@ -812,7 +1101,7 @@ public class TestNSSummaryEndpointWithOBS {
         .setBucketName(BUCKET_FOUR)
         .setObjectID(BUCKET_FOUR_OBJECT_ID)
         .setQuotaInBytes(BUCKET_FOUR_QUOTA)
-        .setBucketLayout(getBucketLayout())
+        .setBucketLayout(getLegacyBucketLayout())
         .build();
 
     String bucketKey = omMetadataManager.getBucketKey(
@@ -847,7 +1136,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_ONE_OBJECT_ID,
         VOL_OBJECT_ID,
         Collections.singletonList(locationInfoGroup),
-        getBucketLayout(),
+        getOBSBucketLayout(),
         FILE_THREE_SIZE);
   }
 
@@ -920,7 +1209,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_ONE_OBJECT_ID,
         VOL_OBJECT_ID,
         Collections.singletonList(locationInfoGroup1),
-        getBucketLayout(),
+        getOBSBucketLayout(),
         FILE_ONE_SIZE);
 
     //vol/bucket1/file2
@@ -934,7 +1223,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_ONE_OBJECT_ID,
         VOL_OBJECT_ID,
         Collections.singletonList(locationInfoGroup2),
-        getBucketLayout(),
+        getOBSBucketLayout(),
         FILE_TWO_SIZE);
 
     //vol/bucket1/file3
@@ -948,7 +1237,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_ONE_OBJECT_ID,
         VOL_OBJECT_ID,
         Collections.singletonList(locationInfoGroup1),
-        getBucketLayout(),
+        getOBSBucketLayout(),
         FILE_THREE_SIZE);
 
     //vol/bucket2/file4
@@ -962,7 +1251,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_TWO_OBJECT_ID,
         VOL_OBJECT_ID,
         Collections.singletonList(locationInfoGroup2),
-        getBucketLayout(),
+        getOBSBucketLayout(),
         FILE_FOUR_SIZE);
 
     //vol/bucket2/file5
@@ -976,7 +1265,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_TWO_OBJECT_ID,
         VOL_OBJECT_ID,
         Collections.singletonList(locationInfoGroup1),
-        getBucketLayout(),
+        getOBSBucketLayout(),
         FILE_FIVE_SIZE);
 
     //vol2/bucket3/file8
@@ -990,7 +1279,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_THREE_OBJECT_ID,
         VOL_TWO_OBJECT_ID,
         Collections.singletonList(locationInfoGroup2),
-        getBucketLayout(),
+        getLegacyBucketLayout(),
         FILE_EIGHT_SIZE);
 
     //vol2/bucket3/file9
@@ -1004,7 +1293,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_THREE_OBJECT_ID,
         VOL_TWO_OBJECT_ID,
         Collections.singletonList(locationInfoGroup1),
-        getBucketLayout(),
+        getLegacyBucketLayout(),
         FILE_NINE_SIZE);
 
     //vol2/bucket3/file10
@@ -1018,7 +1307,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_THREE_OBJECT_ID,
         VOL_TWO_OBJECT_ID,
         Collections.singletonList(locationInfoGroup2),
-        getBucketLayout(),
+        getLegacyBucketLayout(),
         FILE_TEN_SIZE);
 
     //vol2/bucket4/file11
@@ -1032,7 +1321,7 @@ public class TestNSSummaryEndpointWithOBS {
         BUCKET_FOUR_OBJECT_ID,
         VOL_TWO_OBJECT_ID,
         Collections.singletonList(locationInfoGroup1),
-        getBucketLayout(),
+        getLegacyBucketLayout(),
         FILE_ELEVEN_SIZE);
   }
 
@@ -1115,10 +1404,14 @@ public class TestNSSummaryEndpointWithOBS {
     return reconSCM;
   }
 
-  private static BucketLayout getBucketLayout() {
+  private static BucketLayout getOBSBucketLayout() {
     return BucketLayout.OBJECT_STORE;
   }
 
+  private static BucketLayout getLegacyBucketLayout() {
+    return BucketLayout.LEGACY;
+  }
+
   private static SCMNodeStat getMockSCMRootStat() {
     return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE,
         ROOT_QUOTA - ROOT_DATA_SIZE, 0L, 0L);
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java
new file mode 100644
index 0000000000..db48036763
--- /dev/null
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java
@@ -0,0 +1,554 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.tasks;
+
+import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.recon.ReconConstants;
+import org.apache.hadoop.ozone.recon.ReconTestInjector;
+import org.apache.hadoop.ozone.recon.api.types.NSSummary;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
+import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Nested;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Set;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+
+/**
+ * Test for NSSummaryTaskWithLegacy focusing on the OBS (Object Store) layout.
+ */
+public final class TestNSSummaryTaskWithLegacyOBSLayout {
+
+  private static ReconNamespaceSummaryManager reconNamespaceSummaryManager;
+  private static ReconOMMetadataManager reconOMMetadataManager;
+  private static OzoneConfiguration ozoneConfiguration;
+  private static NSSummaryTaskWithLegacy nSSummaryTaskWithLegacy;
+
+  private static OMMetadataManager omMetadataManager;
+  private static OzoneConfiguration omConfiguration;
+
+  // Object names
+  private static final String VOL = "vol";
+  private static final String BUCKET_ONE = "bucket1";
+  private static final String BUCKET_TWO = "bucket2";
+  private static final String KEY_ONE = "key1";
+  private static final String KEY_TWO = "key2";
+  private static final String KEY_THREE = "dir1/dir2/key3";
+  private static final String KEY_FOUR = "key4///////////";
+  private static final String KEY_FIVE = "//////////";
+  private static final String KEY_SIX = "key6";
+  private static final String KEY_SEVEN = "/////key7";
+
+  private static final String TEST_USER = "TestUser";
+
+  private static final long PARENT_OBJECT_ID_ZERO = 0L;
+  private static final long VOL_OBJECT_ID = 0L;
+  private static final long BUCKET_ONE_OBJECT_ID = 1L;
+  private static final long BUCKET_TWO_OBJECT_ID = 2L;
+  private static final long KEY_ONE_OBJECT_ID = 3L;
+  private static final long KEY_TWO_OBJECT_ID = 5L;
+  private static final long KEY_FOUR_OBJECT_ID = 6L;
+  private static final long KEY_THREE_OBJECT_ID = 8L;
+  private static final long KEY_FIVE_OBJECT_ID = 9L;
+  private static final long KEY_SIX_OBJECT_ID = 10L;
+  private static final long KEY_SEVEN_OBJECT_ID = 11L;
+
+
+  private static final long KEY_ONE_SIZE = 500L;
+  private static final long KEY_TWO_OLD_SIZE = 1025L;
+  private static final long KEY_TWO_UPDATE_SIZE = 1023L;
+  private static final long KEY_THREE_SIZE =
+      ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L;
+  private static final long KEY_FOUR_SIZE = 2050L;
+  private static final long KEY_FIVE_SIZE = 100L;
+  private static final long KEY_SIX_SIZE = 6000L;
+  private static final long KEY_SEVEN_SIZE = 7000L;
+
+  private TestNSSummaryTaskWithLegacyOBSLayout() {
+  }
+
+  @BeforeAll
+  public static void setUp(@TempDir File tmpDir) throws Exception {
+    initializeNewOmMetadataManager(new File(tmpDir, "om"));
+    OzoneManagerServiceProviderImpl ozoneManagerServiceProvider =
+        getMockOzoneManagerServiceProviderWithFSO();
+    reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager,
+        new File(tmpDir, "recon"));
+    ozoneConfiguration = new OzoneConfiguration();
+    
ozoneConfiguration.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+        false);
+
+    ReconTestInjector reconTestInjector =
+        new ReconTestInjector.Builder(tmpDir)
+            .withReconOm(reconOMMetadataManager)
+            .withOmServiceProvider(ozoneManagerServiceProvider)
+            .withReconSqlDb()
+            .withContainerDB()
+            .build();
+    reconNamespaceSummaryManager =
+        reconTestInjector.getInstance(ReconNamespaceSummaryManager.class);
+
+    NSSummary nonExistentSummary =
+        reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
+    assertNull(nonExistentSummary);
+
+    populateOMDB();
+
+    nSSummaryTaskWithLegacy = new NSSummaryTaskWithLegacy(
+        reconNamespaceSummaryManager,
+        reconOMMetadataManager, ozoneConfiguration);
+  }
+
+  /**
+   * Nested class for testing NSSummaryTaskWithLegacy reprocess.
+   */
+  @Nested
+  public class TestReprocess {
+
+    private NSSummary nsSummaryForBucket1;
+    private NSSummary nsSummaryForBucket2;
+
+    @BeforeEach
+    public void setUp() throws IOException {
+      // write a NSSummary prior to reprocess
+      // verify it got cleaned up after.
+      NSSummary staleNSSummary = new NSSummary();
+      RDBBatchOperation rdbBatchOperation = new RDBBatchOperation();
+      reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, 
-1L,
+          staleNSSummary);
+      reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation);
+
+      // Verify commit
+      assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L));
+
+      // reinit Recon RocksDB's namespace CF.
+      reconNamespaceSummaryManager.clearNSSummaryTable();
+
+      nSSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager);
+      assertNull(reconNamespaceSummaryManager.getNSSummary(-1L));
+
+      nsSummaryForBucket1 =
+          reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
+      nsSummaryForBucket2 =
+          reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID);
+      assertNotNull(nsSummaryForBucket1);
+      assertNotNull(nsSummaryForBucket2);
+    }
+
+    @Test
+    public void testReprocessNSSummaryNull() throws IOException {
+      assertNull(reconNamespaceSummaryManager.getNSSummary(-1L));
+    }
+
+    @Test
+    public void testReprocessGetFiles() {
+      assertEquals(3, nsSummaryForBucket1.getNumOfFiles());
+      assertEquals(2, nsSummaryForBucket2.getNumOfFiles());
+
+      assertEquals(KEY_ONE_SIZE + KEY_TWO_OLD_SIZE + KEY_THREE_SIZE,
+          nsSummaryForBucket1.getSizeOfFiles());
+      assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE,
+          nsSummaryForBucket2.getSizeOfFiles());
+    }
+
+    @Test
+    public void testReprocessFileBucketSize() {
+      int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket();
+      int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket();
+      assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS,
+          fileDistBucket1.length);
+      assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS,
+          fileDistBucket2.length);
+
+      // Check for 1's and 0's in fileDistBucket1
+      int[] expectedIndexes1 = {0, 1, 40};
+      for (int index = 0; index < fileDistBucket1.length; index++) {
+        if (contains(expectedIndexes1, index)) {
+          assertEquals(1, fileDistBucket1[index]);
+        } else {
+          assertEquals(0, fileDistBucket1[index]);
+        }
+      }
+
+      // Check for 1's and 0's in fileDistBucket2
+      int[] expectedIndexes2 = {0, 2};
+      for (int index = 0; index < fileDistBucket2.length; index++) {
+        if (contains(expectedIndexes2, index)) {
+          assertEquals(1, fileDistBucket2[index]);
+        } else {
+          assertEquals(0, fileDistBucket2[index]);
+        }
+      }
+    }
+
+  }
+
+  /**
+   * Nested class for testing NSSummaryTaskWithLegacy process.
+   */
+  @Nested
+  public class TestProcess {
+
+    private NSSummary nsSummaryForBucket1;
+    private NSSummary nsSummaryForBucket2;
+
+    private OMDBUpdateEvent keyEvent1;
+    private OMDBUpdateEvent keyEvent2;
+    private OMDBUpdateEvent keyEvent3;
+    private OMDBUpdateEvent keyEvent4;
+
+    @BeforeEach
+    public void setUp() throws IOException {
+      // reinit Recon RocksDB's namespace CF.
+      reconNamespaceSummaryManager.clearNSSummaryTable();
+      nSSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager);
+      nSSummaryTaskWithLegacy.processWithLegacy(processEventBatch());
+
+      nsSummaryForBucket1 =
+          reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID);
+      assertNotNull(nsSummaryForBucket1);
+      nsSummaryForBucket2 =
+          reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID);
+      assertNotNull(nsSummaryForBucket2);
+    }
+
+    private OMUpdateEventBatch processEventBatch() throws IOException {
+      // Test PUT Event.
+      // PUT Key6 in Bucket2.
+      String omPutKey =
+          OM_KEY_PREFIX + VOL
+              + OM_KEY_PREFIX + BUCKET_TWO +
+              OM_KEY_PREFIX + KEY_SIX;
+      OmKeyInfo omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_TWO, KEY_SIX,
+          KEY_SIX, KEY_SIX_OBJECT_ID, BUCKET_TWO_OBJECT_ID, KEY_SIX_SIZE);
+      keyEvent1 = new OMDBUpdateEvent.
+          OMUpdateEventBuilder<String, OmKeyInfo>()
+          .setKey(omPutKey)
+          .setValue(omPutKeyInfo)
+          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+              .getName())
+          .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
+          .build();
+      // PUT Key7 in Bucket1.
+      omPutKey =
+          OM_KEY_PREFIX + VOL
+              + OM_KEY_PREFIX + BUCKET_ONE +
+              OM_KEY_PREFIX + KEY_SEVEN;
+      omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_SEVEN,
+          KEY_SEVEN, KEY_SEVEN_OBJECT_ID, BUCKET_ONE_OBJECT_ID, 
KEY_SEVEN_SIZE);
+      keyEvent2 = new OMDBUpdateEvent.
+          OMUpdateEventBuilder<String, OmKeyInfo>()
+          .setKey(omPutKey)
+          .setValue(omPutKeyInfo)
+          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+              .getName())
+          .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
+          .build();
+
+      // Test DELETE Event.
+      // Delete Key1 in Bucket1.
+      String omDeleteKey =
+          OM_KEY_PREFIX + VOL
+              + OM_KEY_PREFIX + BUCKET_ONE +
+              OM_KEY_PREFIX + KEY_ONE;
+      OmKeyInfo omDeleteKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_ONE,
+          KEY_ONE, KEY_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_ONE_SIZE);
+      keyEvent3 = new OMDBUpdateEvent.
+          OMUpdateEventBuilder<String, OmKeyInfo>()
+          .setKey(omDeleteKey)
+          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+              .getName())
+          .setValue(omDeleteKeyInfo)
+          .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE)
+          .build();
+
+      // Test UPDATE Event.
+      // Resize Key2 in Bucket1.
+      String omResizeKey =
+          OM_KEY_PREFIX + VOL
+              + OM_KEY_PREFIX + BUCKET_ONE +
+              OM_KEY_PREFIX + KEY_TWO;
+      OmKeyInfo oldOmResizeKeyInfo =
+          buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID,
+              BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE);
+      OmKeyInfo newOmResizeKeyInfo =
+          buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID,
+              BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE + 100);
+      keyEvent4 = new OMDBUpdateEvent.
+          OMUpdateEventBuilder<String, OmKeyInfo>()
+          .setKey(omResizeKey)
+          .setOldValue(oldOmResizeKeyInfo)
+          .setValue(newOmResizeKeyInfo)
+          .setTable(omMetadataManager.getKeyTable(getBucketLayout())
+              .getName())
+          .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE)
+          .build();
+
+      return new OMUpdateEventBatch(
+          Arrays.asList(keyEvent1, keyEvent2, keyEvent3, keyEvent4));
+    }
+
+    @Test
+    public void testProcessForCount() throws IOException {
+      assertNotNull(nsSummaryForBucket1);
+      assertEquals(3, nsSummaryForBucket1.getNumOfFiles());
+      assertNotNull(nsSummaryForBucket2);
+      assertEquals(3, nsSummaryForBucket2.getNumOfFiles());
+
+      Set<Long> childDirBucket1 = nsSummaryForBucket1.getChildDir();
+      assertEquals(0, childDirBucket1.size());
+      Set<Long> childDirBucket2 = nsSummaryForBucket2.getChildDir();
+      assertEquals(0, childDirBucket2.size());
+    }
+
+    @Test
+    public void testProcessForSize() throws IOException {
+      assertNotNull(nsSummaryForBucket1);
+      assertEquals(
+          KEY_THREE_SIZE + KEY_SEVEN_SIZE + KEY_TWO_OLD_SIZE + 100,
+          nsSummaryForBucket1.getSizeOfFiles());
+      assertNotNull(nsSummaryForBucket2);
+      assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE,
+          nsSummaryForBucket2.getSizeOfFiles());
+    }
+
+
+    @Test
+    public void testProcessFileBucketSize() {
+      int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket();
+      int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket();
+      assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS,
+          fileDistBucket1.length);
+      assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS,
+          fileDistBucket2.length);
+
+      // Check for 1's and 0's in fileDistBucket1
+      int[] expectedIndexes1 = {1, 3, 40};
+      for (int index = 0; index < fileDistBucket1.length; index++) {
+        if (contains(expectedIndexes1, index)) {
+          assertEquals(1, fileDistBucket1[index]);
+        } else {
+          assertEquals(0, fileDistBucket1[index]);
+        }
+      }
+
+      // Check for 1's and 0's in fileDistBucket2
+      int[] expectedIndexes2 = {0, 2, 3};
+      for (int index = 0; index < fileDistBucket2.length; index++) {
+        if (contains(expectedIndexes2, index)) {
+          assertEquals(1, fileDistBucket2[index]);
+        } else {
+          assertEquals(0, fileDistBucket2[index]);
+        }
+      }
+    }
+
+  }
+
+  /**
+   * Populate OMDB with the following configs.
+   *         vol
+   *      /       \
+   * bucket1     bucket2
+   * /    \   \        \  \
+   * key1  key2   key3   key4 key5
+   *
+   * @throws IOException
+   */
+  private static void populateOMDB() throws IOException {
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_ONE,
+        BUCKET_ONE,
+        VOL,
+        KEY_ONE,
+        KEY_ONE_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_ONE_SIZE,
+        getBucketLayout());
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_TWO,
+        BUCKET_ONE,
+        VOL,
+        KEY_TWO,
+        KEY_TWO_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_TWO_OLD_SIZE,
+        getBucketLayout());
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_THREE,
+        BUCKET_ONE,
+        VOL,
+        KEY_THREE,
+        KEY_THREE_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_THREE_SIZE,
+        getBucketLayout());
+
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_FOUR,
+        BUCKET_TWO,
+        VOL,
+        KEY_FOUR,
+        KEY_FOUR_OBJECT_ID,
+        BUCKET_TWO_OBJECT_ID,
+        BUCKET_TWO_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_FOUR_SIZE,
+        getBucketLayout());
+    writeKeyToOm(reconOMMetadataManager,
+        KEY_FIVE,
+        BUCKET_TWO,
+        VOL,
+        KEY_FIVE,
+        KEY_FIVE_OBJECT_ID,
+        PARENT_OBJECT_ID_ZERO,
+        BUCKET_TWO_OBJECT_ID,
+        VOL_OBJECT_ID,
+        KEY_FIVE_SIZE,
+        getBucketLayout());
+  }
+
+  /**
+   * Create a new OM Metadata manager instance with one user, one vol, and two
+   * buckets.
+   *
+   * @throws IOException ioEx
+   */
+  private static void initializeNewOmMetadataManager(
+      File omDbDir)
+      throws IOException {
+    omConfiguration = new OzoneConfiguration();
+    omConfiguration.set(OZONE_OM_DB_DIRS,
+        omDbDir.getAbsolutePath());
+    omConfiguration.set(OMConfigKeys
+        .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true");
+    omMetadataManager = new OmMetadataManagerImpl(
+        omConfiguration, null);
+
+    String volumeKey = omMetadataManager.getVolumeKey(VOL);
+    OmVolumeArgs args =
+        OmVolumeArgs.newBuilder()
+            .setObjectID(VOL_OBJECT_ID)
+            .setVolume(VOL)
+            .setAdminName(TEST_USER)
+            .setOwnerName(TEST_USER)
+            .build();
+    omMetadataManager.getVolumeTable().put(volumeKey, args);
+
+    OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder()
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_ONE)
+        .setObjectID(BUCKET_ONE_OBJECT_ID)
+        .setBucketLayout(getBucketLayout())
+        .build();
+
+    OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder()
+        .setVolumeName(VOL)
+        .setBucketName(BUCKET_TWO)
+        .setObjectID(BUCKET_TWO_OBJECT_ID)
+        .setBucketLayout(getBucketLayout())
+        .build();
+
+    String bucketKey = omMetadataManager.getBucketKey(
+        bucketInfo1.getVolumeName(), bucketInfo1.getBucketName());
+    String bucketKey2 = omMetadataManager.getBucketKey(
+        bucketInfo2.getVolumeName(), bucketInfo2.getBucketName());
+
+    omMetadataManager.getBucketTable().put(bucketKey, bucketInfo1);
+    omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2);
+  }
+
+  /**
+   * Build a key info for put/update action.
+   *
+   * @param volume         volume name
+   * @param bucket         bucket name
+   * @param key            key name
+   * @param fileName       file name
+   * @param objectID       object ID
+   * @param parentObjectId parent object ID
+   * @param dataSize       file size
+   * @return the KeyInfo
+   */
+  private static OmKeyInfo buildOmKeyInfo(String volume,
+                                          String bucket,
+                                          String key,
+                                          String fileName,
+                                          long objectID,
+                                          long parentObjectId,
+                                          long dataSize) {
+    return new OmKeyInfo.Builder()
+        .setBucketName(bucket)
+        .setVolumeName(volume)
+        .setKeyName(key)
+        .setFileName(fileName)
+        .setReplicationConfig(
+            StandaloneReplicationConfig.getInstance(
+                HddsProtos.ReplicationFactor.ONE))
+        .setObjectID(objectID)
+        .setParentObjectID(parentObjectId)
+        .setDataSize(dataSize)
+        .build();
+  }
+
+  // Helper method to check if an array contains a specific value
+  private boolean contains(int[] arr, int value) {
+    for (int num : arr) {
+      if (num == value) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private static BucketLayout getBucketLayout() {
+    return BucketLayout.LEGACY;
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to