[05/50] [abbrv] hadoop git commit: HDFS-11762. [SPS]: Empty files should be ignored in StoragePolicySatisfier. Surendra Singh Lilhore.

2018-07-19 Thread rakeshr
HDFS-11762. [SPS]: Empty files should be ignored in StoragePolicySatisfier. 
Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3385bb6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3385bb6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3385bb6

Branch: refs/heads/HDFS-10285
Commit: f3385bb643ca2559836f68e1afdb40457be18397
Parents: c03b9a9
Author: Rakesh Radhakrishnan 
Authored: Mon Jun 5 12:32:41 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 19 22:46:05 2018 +0530

--
 .../namenode/FSDirSatisfyStoragePolicyOp.java   | 15 ++---
 .../namenode/TestStoragePolicySatisfier.java| 32 
 2 files changed, 42 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3385bb6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
index 81d337f..bd4e5ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
@@ -51,7 +51,6 @@ final class FSDirSatisfyStoragePolicyOp {
 
 assert fsd.getFSNamesystem().hasWriteLock();
 FSPermissionChecker pc = fsd.getPermissionChecker();
-List xAttrs = Lists.newArrayListWithCapacity(1);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -62,8 +61,11 @@ final class FSDirSatisfyStoragePolicyOp {
 fsd.checkPathAccess(pc, iip, FsAction.WRITE);
   }
   XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
-  xAttrs.add(satisfyXAttr);
-  fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+  if (satisfyXAttr != null) {
+List xAttrs = Lists.newArrayListWithCapacity(1);
+xAttrs.add(satisfyXAttr);
+fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+  }
 } finally {
   fsd.writeUnlock();
 }
@@ -79,16 +81,19 @@ final class FSDirSatisfyStoragePolicyOp {
 
 // TODO: think about optimization here, label the dir instead
 // of the sub-files of the dir.
-if (inode.isFile()) {
+if (inode.isFile() && inode.asFile().numBlocks() != 0) {
   candidateNodes.add(inode);
 } else if (inode.isDirectory()) {
   for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
-if (node.isFile()) {
+if (node.isFile() && node.asFile().numBlocks() != 0) {
   candidateNodes.add(node);
 }
   }
 }
 
+if (candidateNodes.isEmpty()) {
+  return null;
+}
 // If node has satisfy xattr, then stop adding it
 // to satisfy movement queue.
 if (inodeHasSatisfyXAttr(candidateNodes)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3385bb6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index fa954b8..8e08a1e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -907,6 +907,38 @@ public class TestStoragePolicySatisfier {
 }
   }
 
+  /**
+   * Test SPS with empty file.
+   * 1. Create one empty file.
+   * 2. Call satisfyStoragePolicy for empty file.
+   * 3. SPS should skip this file and xattr should not be added for empty file.
+   */
+  @Test(timeout = 30)
+  public void testSPSWhenFileLengthIsZero() throws Exception {
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(0)
+  .build();
+  cluster.waitActive();
+  DistributedFileSystem fs = cluster.getFileSystem();
+  Path filePath = new Path("/zeroSizeFile");
+  DFSTestUtil.createFile(fs, filePath, 0, (short) 1, 0);
+  FSEditLog editlog = cluster.getNameNode().getNamesystem().getEditLog();
+  long lastWrittenTxId = editlog.getLastWrittenTxId();
+  fs.satisfyStoragePo

[05/50] [abbrv] hadoop git commit: HDFS-11762. [SPS]: Empty files should be ignored in StoragePolicySatisfier. Surendra Singh Lilhore.

2018-07-16 Thread rakeshr
HDFS-11762. [SPS]: Empty files should be ignored in StoragePolicySatisfier. 
Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/499478c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/499478c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/499478c7

Branch: refs/heads/HDFS-10285
Commit: 499478c7a0250833ad7cf0fa9d660a6bfe430d66
Parents: 506c1bd
Author: Rakesh Radhakrishnan 
Authored: Mon Jun 5 12:32:41 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Sun Jul 15 20:18:22 2018 +0530

--
 .../namenode/FSDirSatisfyStoragePolicyOp.java   | 15 ++---
 .../namenode/TestStoragePolicySatisfier.java| 32 
 2 files changed, 42 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/499478c7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
index 81d337f..bd4e5ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
@@ -51,7 +51,6 @@ final class FSDirSatisfyStoragePolicyOp {
 
 assert fsd.getFSNamesystem().hasWriteLock();
 FSPermissionChecker pc = fsd.getPermissionChecker();
-List xAttrs = Lists.newArrayListWithCapacity(1);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -62,8 +61,11 @@ final class FSDirSatisfyStoragePolicyOp {
 fsd.checkPathAccess(pc, iip, FsAction.WRITE);
   }
   XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
-  xAttrs.add(satisfyXAttr);
-  fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+  if (satisfyXAttr != null) {
+List xAttrs = Lists.newArrayListWithCapacity(1);
+xAttrs.add(satisfyXAttr);
+fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+  }
 } finally {
   fsd.writeUnlock();
 }
@@ -79,16 +81,19 @@ final class FSDirSatisfyStoragePolicyOp {
 
 // TODO: think about optimization here, label the dir instead
 // of the sub-files of the dir.
-if (inode.isFile()) {
+if (inode.isFile() && inode.asFile().numBlocks() != 0) {
   candidateNodes.add(inode);
 } else if (inode.isDirectory()) {
   for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
-if (node.isFile()) {
+if (node.isFile() && node.asFile().numBlocks() != 0) {
   candidateNodes.add(node);
 }
   }
 }
 
+if (candidateNodes.isEmpty()) {
+  return null;
+}
 // If node has satisfy xattr, then stop adding it
 // to satisfy movement queue.
 if (inodeHasSatisfyXAttr(candidateNodes)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/499478c7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index fa954b8..8e08a1e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -907,6 +907,38 @@ public class TestStoragePolicySatisfier {
 }
   }
 
+  /**
+   * Test SPS with empty file.
+   * 1. Create one empty file.
+   * 2. Call satisfyStoragePolicy for empty file.
+   * 3. SPS should skip this file and xattr should not be added for empty file.
+   */
+  @Test(timeout = 30)
+  public void testSPSWhenFileLengthIsZero() throws Exception {
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(0)
+  .build();
+  cluster.waitActive();
+  DistributedFileSystem fs = cluster.getFileSystem();
+  Path filePath = new Path("/zeroSizeFile");
+  DFSTestUtil.createFile(fs, filePath, 0, (short) 1, 0);
+  FSEditLog editlog = cluster.getNameNode().getNamesystem().getEditLog();
+  long lastWrittenTxId = editlog.getLastWrittenTxId();
+  fs.satisfyStoragePo

[05/50] [abbrv] hadoop git commit: HDFS-11762. [SPS]: Empty files should be ignored in StoragePolicySatisfier. Surendra Singh Lilhore.

2018-07-12 Thread rakeshr
HDFS-11762. [SPS]: Empty files should be ignored in StoragePolicySatisfier. 
Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab0f4123
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab0f4123
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab0f4123

Branch: refs/heads/HDFS-10285
Commit: ab0f412387937c9bd1468beed6a4f3c495457181
Parents: dacec33
Author: Rakesh Radhakrishnan 
Authored: Mon Jun 5 12:32:41 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:00:48 2018 +0530

--
 .../namenode/FSDirSatisfyStoragePolicyOp.java   | 15 ++---
 .../namenode/TestStoragePolicySatisfier.java| 32 
 2 files changed, 42 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab0f4123/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
index 81d337f..bd4e5ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
@@ -51,7 +51,6 @@ final class FSDirSatisfyStoragePolicyOp {
 
 assert fsd.getFSNamesystem().hasWriteLock();
 FSPermissionChecker pc = fsd.getPermissionChecker();
-List xAttrs = Lists.newArrayListWithCapacity(1);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -62,8 +61,11 @@ final class FSDirSatisfyStoragePolicyOp {
 fsd.checkPathAccess(pc, iip, FsAction.WRITE);
   }
   XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
-  xAttrs.add(satisfyXAttr);
-  fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+  if (satisfyXAttr != null) {
+List xAttrs = Lists.newArrayListWithCapacity(1);
+xAttrs.add(satisfyXAttr);
+fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+  }
 } finally {
   fsd.writeUnlock();
 }
@@ -79,16 +81,19 @@ final class FSDirSatisfyStoragePolicyOp {
 
 // TODO: think about optimization here, label the dir instead
 // of the sub-files of the dir.
-if (inode.isFile()) {
+if (inode.isFile() && inode.asFile().numBlocks() != 0) {
   candidateNodes.add(inode);
 } else if (inode.isDirectory()) {
   for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
-if (node.isFile()) {
+if (node.isFile() && node.asFile().numBlocks() != 0) {
   candidateNodes.add(node);
 }
   }
 }
 
+if (candidateNodes.isEmpty()) {
+  return null;
+}
 // If node has satisfy xattr, then stop adding it
 // to satisfy movement queue.
 if (inodeHasSatisfyXAttr(candidateNodes)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab0f4123/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index fa954b8..8e08a1e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -907,6 +907,38 @@ public class TestStoragePolicySatisfier {
 }
   }
 
+  /**
+   * Test SPS with empty file.
+   * 1. Create one empty file.
+   * 2. Call satisfyStoragePolicy for empty file.
+   * 3. SPS should skip this file and xattr should not be added for empty file.
+   */
+  @Test(timeout = 30)
+  public void testSPSWhenFileLengthIsZero() throws Exception {
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(0)
+  .build();
+  cluster.waitActive();
+  DistributedFileSystem fs = cluster.getFileSystem();
+  Path filePath = new Path("/zeroSizeFile");
+  DFSTestUtil.createFile(fs, filePath, 0, (short) 1, 0);
+  FSEditLog editlog = cluster.getNameNode().getNamesystem().getEditLog();
+  long lastWrittenTxId = editlog.getLastWrittenTxId();
+  fs.satisfyStoragePo