This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 2d12496  HDFS-15480. Ordered snapshot deletion: record snapshot 
deletion in XAttr (#2163)
2d12496 is described below

commit 2d12496643b1b7cfa4eb270ec9b2fcdb78a58798
Author: bshashikant <shashik...@apache.org>
AuthorDate: Thu Jul 23 04:46:27 2020 +0530

    HDFS-15480. Ordered snapshot deletion: record snapshot deletion in XAttr 
(#2163)
---
 .../hdfs/server/common/HdfsServerConstants.java    |   1 +
 .../hdfs/server/namenode/FSDirSnapshotOp.java      |  20 ---
 .../hadoop/hdfs/server/namenode/FSDirXAttrOp.java  |  13 +-
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  10 +-
 .../server/namenode/snapshot/SnapshotManager.java  |  50 +++++--
 .../namenode/TestOrderedSnapshotDeletion.java      | 157 +++++++++++++++++----
 6 files changed, 186 insertions(+), 65 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 78d4289..a55985e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -366,6 +366,7 @@ public interface HdfsServerConstants {
       "security.hdfs.unreadable.by.superuser";
   String XATTR_ERASURECODING_POLICY =
       "system.hdfs.erasurecoding.policy";
+  String SNAPSHOT_XATTR_NAME = "system.hdfs.snapshot.deleted";
 
   String XATTR_SATISFY_STORAGE_POLICY = "user.hdfs.sps";
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
index c2eb401..923c6a8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -252,25 +251,6 @@ class FSDirSnapshotOp {
 
     // time of snapshot deletion
     final long now = Time.now();
-    if (fsd.isSnapshotDeletionOrdered()) {
-      final INodeDirectory srcRoot = snapshotManager.getSnapshottableRoot(iip);
-      final DirectorySnapshottableFeature snapshottable
-          = srcRoot.getDirectorySnapshottableFeature();
-      final Snapshot snapshot = snapshottable.getSnapshotByName(
-          srcRoot, snapshotName);
-
-      // Diffs must be not empty since a snapshot exists in the list
-      final int earliest = snapshottable.getDiffs().iterator().next()
-          .getSnapshotId();
-      if (snapshot.getId() != earliest) {
-        throw new SnapshotException("Failed to delete snapshot " + snapshotName
-            + " from directory " + srcRoot.getFullPathName()
-            + ": " + snapshot + " is not the earliest snapshot id=" + earliest
-            + " (" + DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED
-            + " is " + fsd.isSnapshotDeletionOrdered() + ")");
-      }
-    }
-
     final INode.BlocksMapUpdateInfo collectedBlocks = deleteSnapshot(
         fsd, snapshotManager, iip, snapshotName, now);
     fsd.getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index ff82610..4f215ac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -41,12 +41,13 @@ import java.util.EnumSet;
 import java.util.List;
 import java.util.ListIterator;
 
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE;
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO;
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SNAPSHOT_XATTR_NAME;
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE;
 
-class FSDirXAttrOp {
+public class FSDirXAttrOp {
   private static final XAttr KEYID_XATTR =
       XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, null);
   private static final XAttr UNREADABLE_BY_SUPERUSER_XATTR =
@@ -265,7 +266,7 @@ class FSDirXAttrOp {
     return newXAttrs;
   }
 
-  static INode unprotectedSetXAttrs(
+  public static INode unprotectedSetXAttrs(
       FSDirectory fsd, final INodesInPath iip, final List<XAttr> xAttrs,
       final EnumSet<XAttrSetFlag> flag)
       throws IOException {
@@ -326,6 +327,12 @@ class FSDirXAttrOp {
         throw new IOException("Can only set '" +
             SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' on a file.");
       }
+
+      if (xaName.equals(SNAPSHOT_XATTR_NAME) && !(inode.isDirectory() &&
+          inode.getParent().isSnapshottable())) {
+        throw new IOException("Can only set '" +
+            SNAPSHOT_XATTR_NAME + "' on a snapshot root.");
+      }
     }
 
     XAttrStorage.updateINodeXAttrs(inode, newXAttrs, 
iip.getLatestSnapshotId());
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 6df255e..2c7932c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -359,14 +359,6 @@ public class FSDirectory implements Closeable {
             DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED_DEFAULT);
     LOG.info("{} = {}", DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED,
         snapshotDeletionOrdered);
-    if (snapshotDeletionOrdered && !xattrsEnabled) {
-      throw new HadoopIllegalArgumentException("" +
-          "XAttrs is required by snapshotDeletionOrdered:"
-          + DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED
-          + " is true but "
-          + DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY
-          + " is false.");
-    }
 
     this.accessTimePrecision = conf.getLong(
         DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
@@ -626,7 +618,7 @@ public class FSDirectory implements Closeable {
   }
   int getXattrMaxSize() { return xattrMaxSize; }
 
-  boolean isSnapshotDeletionOrdered() {
+  public boolean isSnapshotDeletionOrdered() {
     return snapshotDeletionOrdered;
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
index 30b98b8..9ace8a9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
@@ -36,10 +36,14 @@ import java.util.concurrent.atomic.AtomicInteger;
 import javax.management.ObjectName;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
@@ -47,14 +51,9 @@ import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
-import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.namenode.*;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
-import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.INode;
-import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
-import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
-import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.metrics2.util.MBeans;
 
 import com.google.common.base.Preconditions;
@@ -352,7 +351,38 @@ public class SnapshotManager implements 
SnapshotStatsMXBean {
    */
   public void deleteSnapshot(final INodesInPath iip, final String snapshotName,
       INode.ReclaimContext reclaimContext, long now) throws IOException {
-    INodeDirectory srcRoot = getSnapshottableRoot(iip);
+    final INodeDirectory srcRoot = getSnapshottableRoot(iip);
+    if (fsdir.isSnapshotDeletionOrdered()) {
+      final DirectorySnapshottableFeature snapshottable
+          = srcRoot.getDirectorySnapshottableFeature();
+      final Snapshot snapshot = snapshottable.getSnapshotByName(
+          srcRoot, snapshotName);
+
+      // Diffs must be not empty since a snapshot exists in the list
+      final int earliest = snapshottable.getDiffs().iterator().next()
+          .getSnapshotId();
+      if (snapshot.getId() != earliest) {
+        final XAttr snapshotXAttr = buildXAttr();
+        final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
+        xattrs.add(snapshotXAttr);
+
+        // The snapshot to be deleted is just marked for deletion in the xAttr.
+        // Same snaphot delete call can happen multiple times until and unless
+        // the very 1st instance of a snapshot delete hides it/remove it from
+        // snapshot list. XAttrSetFlag.REPLACE needs to be set to here in order
+        // to address this.
+
+        // XAttr will set on the snapshot root directory
+        // NOTE : This function is directly called while replaying the edit
+        // logs.While replaying the edit logs we need to mark the snapshot
+        // deleted in the xattr of the snapshot root.
+        FSDirXAttrOp.unprotectedSetXAttrs(fsdir,
+            INodesInPath.append(iip, snapshot.getRoot(),
+                DFSUtil.string2Bytes(snapshotName)), xattrs,
+            EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
+        return;
+      }
+    }
     srcRoot.removeSnapshot(reclaimContext, snapshotName, now);
     numSnapshots.getAndDecrement();
   }
@@ -545,6 +575,10 @@ public class SnapshotManager implements 
SnapshotStatsMXBean {
     return ((1 << SNAPSHOT_ID_BIT_WIDTH) - 1);
   }
 
+  public static XAttr buildXAttr() {
+    return XAttrHelper.buildXAttr(HdfsServerConstants.SNAPSHOT_XATTR_NAME);
+  }
+
   private ObjectName mxBeanName;
 
   public void registerMXBean() {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestOrderedSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestOrderedSnapshotDeletion.java
index c8df780..d3097ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestOrderedSnapshotDeletion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestOrderedSnapshotDeletion.java
@@ -19,32 +19,35 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.SnapshotException;
+import org.apache.hadoop.hdfs.XAttrHelper;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.event.Level;
 
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.Map;
 
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.
+    DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
-/** Test ordered snapshot deletion. */
+/**
+ * Test ordered snapshot deletion.
+ */
 public class TestOrderedSnapshotDeletion {
-  static final Logger LOG = LoggerFactory.getLogger(FSDirectory.class);
-
-  {
-    SnapshotTestHelper.disableLogs();
-    GenericTestUtils.setLogLevel(INode.LOG, Level.TRACE);
-  }
-
+  static final String xattrName = "user.a1";
+  static final byte[] xattrValue = {0x31, 0x32, 0x33};
   private final Path snapshottableDir
       = new Path("/" + getClass().getSimpleName());
 
@@ -67,7 +70,7 @@ public class TestOrderedSnapshotDeletion {
     }
   }
 
-  @Test (timeout=60000)
+  @Test(timeout = 60000)
   public void testConf() throws Exception {
     DistributedFileSystem hdfs = cluster.getFileSystem();
     hdfs.mkdirs(snapshottableDir);
@@ -85,21 +88,125 @@ public class TestOrderedSnapshotDeletion {
     hdfs.mkdirs(sub2);
     hdfs.createSnapshot(snapshottableDir, "s2");
 
-    assertDeletionDenied(snapshottableDir, "s1", hdfs);
-    assertDeletionDenied(snapshottableDir, "s2", hdfs);
+    assertXAttrSet("s1", hdfs, null);
+    assertXAttrSet("s2", hdfs, null);
     hdfs.deleteSnapshot(snapshottableDir, "s0");
-    assertDeletionDenied(snapshottableDir, "s2", hdfs);
+    assertXAttrSet("s2", hdfs, null);
     hdfs.deleteSnapshot(snapshottableDir, "s1");
     hdfs.deleteSnapshot(snapshottableDir, "s2");
   }
 
-  static void assertDeletionDenied(Path snapshottableDir, String snapshot,
-      DistributedFileSystem hdfs) throws IOException {
+  void assertXAttrSet(String snapshot,
+                      DistributedFileSystem hdfs, XAttr newXattr)
+      throws IOException {
+    hdfs.deleteSnapshot(snapshottableDir, snapshot);
+    // Check xAttr for parent directory
+    FSNamesystem namesystem = cluster.getNamesystem();
+    Path snapshotRoot = SnapshotTestHelper.getSnapshotRoot(snapshottableDir,
+        snapshot);
+    INode inode = 
namesystem.getFSDirectory().getINode(snapshotRoot.toString());
+    XAttrFeature f = inode.getXAttrFeature();
+    XAttr xAttr = f.getXAttr(HdfsServerConstants.SNAPSHOT_XATTR_NAME);
+    assertTrue("Snapshot xAttr should exist", xAttr != null);
+    assertTrue(xAttr.getName().equals(HdfsServerConstants.SNAPSHOT_XATTR_NAME.
+        replace("system.", "")));
+    assertTrue(xAttr.getNameSpace().equals(XAttr.NameSpace.SYSTEM));
+    assertNull(xAttr.getValue());
+
+    // Make sure its not user visible
+    if (cluster.getNameNode().getConf().getBoolean(DFSConfigKeys.
+            DFS_NAMENODE_XATTRS_ENABLED_KEY,
+        DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_DEFAULT)) {
+      Map<String, byte[]> xattrMap = hdfs.getXAttrs(snapshotRoot);
+      assertTrue(newXattr == null ? xattrMap.isEmpty() :
+          Arrays.equals(newXattr.getValue(), xattrMap.get(xattrName)));
+    }
+  }
+
+  @Test(timeout = 60000)
+  public void testSnapshotXattrPersistence() throws Exception {
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+
+    final Path sub0 = new Path(snapshottableDir, "sub0");
+    hdfs.mkdirs(sub0);
+    hdfs.createSnapshot(snapshottableDir, "s0");
+
+    final Path sub1 = new Path(snapshottableDir, "sub1");
+    hdfs.mkdirs(sub1);
+    hdfs.createSnapshot(snapshottableDir, "s1");
+    assertXAttrSet("s1", hdfs, null);
+    assertXAttrSet("s1", hdfs, null);
+    cluster.restartNameNodes();
+    assertXAttrSet("s1", hdfs, null);
+  }
+
+  @Test(timeout = 60000)
+  public void testSnapshotXattrWithSaveNameSpace() throws Exception {
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+
+    final Path sub0 = new Path(snapshottableDir, "sub0");
+    hdfs.mkdirs(sub0);
+    hdfs.createSnapshot(snapshottableDir, "s0");
+
+    final Path sub1 = new Path(snapshottableDir, "sub1");
+    hdfs.mkdirs(sub1);
+    hdfs.createSnapshot(snapshottableDir, "s1");
+    assertXAttrSet("s1", hdfs, null);
+    hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
+    hdfs.saveNamespace();
+    hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
+    cluster.restartNameNodes();
+    assertXAttrSet("s1", hdfs, null);
+  }
+
+  @Test(timeout = 60000)
+  public void testSnapshotXattrWithDisablingXattr() throws Exception {
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+
+    final Path sub0 = new Path(snapshottableDir, "sub0");
+    hdfs.mkdirs(sub0);
+    hdfs.createSnapshot(snapshottableDir, "s0");
+
+    final Path sub1 = new Path(snapshottableDir, "sub1");
+    hdfs.mkdirs(sub1);
+    hdfs.createSnapshot(snapshottableDir, "s1");
+    assertXAttrSet("s1", hdfs, null);
+    cluster.getNameNode().getConf().setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, false);
+    cluster.restartNameNodes();
+    // ensure xAttr feature is disabled
     try {
-      hdfs.deleteSnapshot(snapshottableDir, snapshot);
-      Assert.fail("deleting " +snapshot + " should fail");
-    } catch (SnapshotException se) {
-      LOG.info("Good, it is expected to have " + se);
+      hdfs.getXAttrs(snapshottableDir);
+    } catch (Exception e) {
+      assertTrue(e.getMessage().contains("The XAttr operation has been " +
+          "rejected.  Support for XAttrs has been disabled by " +
+          "setting dfs.namenode.xattrs.enabled to false"));
     }
+    // try deleting snapshot and verify it still sets the snapshot XAttr
+    assertXAttrSet("s1", hdfs, null);
+  }
+
+  @Test(timeout = 60000)
+  public void testSnapshotXAttrWithPreExistingXattrs() throws Exception {
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.allowSnapshot(snapshottableDir);
+    hdfs.setXAttr(snapshottableDir, xattrName, xattrValue,
+        EnumSet.of(XAttrSetFlag.CREATE));
+    XAttr newXAttr = XAttrHelper.buildXAttr(xattrName, xattrValue);
+    final Path sub0 = new Path(snapshottableDir, "sub0");
+    hdfs.mkdirs(sub0);
+    hdfs.createSnapshot(snapshottableDir, "s0");
+
+    final Path sub1 = new Path(snapshottableDir, "sub1");
+    hdfs.mkdirs(sub1);
+    hdfs.createSnapshot(snapshottableDir, "s1");
+    assertXAttrSet("s1", hdfs, newXAttr);
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to