hadoop git commit: HDFS-13076: [SPS]: Resolve conflicts after rebasing HDFS-10285 branch to trunk. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10285 9e2cb0cef -> d35255d43


HDFS-13076: [SPS]: Resolve conflicts after rebasing HDFS-10285 branch to trunk. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d35255d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d35255d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d35255d4

Branch: refs/heads/HDFS-10285
Commit: d35255d43b327498962cb20ac4068873bffd863b
Parents: 9e2cb0c
Author: Rakesh Radhakrishnan 
Authored: Thu Jul 5 10:10:13 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 10:10:13 2018 +0530

--
 .../hdfs/server/federation/router/RouterNamenodeProtocol.java | 6 ++
 .../hadoop/hdfs/server/federation/router/RouterRpcServer.java | 7 +++
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java  | 2 +-
 .../server/namenode/sps/IntraSPSNameNodeFileIdCollector.java  | 4 ++--
 4 files changed, 16 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d35255d4/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
index 0433650..edfb391 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
@@ -184,4 +184,10 @@ public class RouterNamenodeProtocol implements 
NamenodeProtocol {
 rpcServer.checkOperation(OperationCategory.READ, false);
 return false;
   }
+
+  @Override
+  public Long getNextSPSPath() throws IOException {
+// not supported
+return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d35255d4/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index d99d0e5..f8905fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2532,4 +2532,11 @@ public class RouterRpcServer extends AbstractService
 checkOperation(OperationCategory.READ, false);
 return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
   }
+
+  @Override
+  public Long getNextSPSPath() throws IOException {
+checkOperation(OperationCategory.READ, false);
+// not supported
+return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d35255d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index cb0de67..94ada2e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -5070,7 +5070,7 @@ public class BlockManager implements BlockStatsMXBean {
 DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_DEFAULT);
 String modeVal = spsMode;
-if (org.apache.commons.lang.StringUtils.isBlank(modeVal)) {
+if (org.apache.commons.lang3.StringUtils.isBlank(modeVal)) {
   modeVal = conf.get(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
   DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d35255d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeFileIdCollector.java
--
diff --git 

[07/50] [abbrv] hadoop git commit: HDFS-11726. [SPS]: StoragePolicySatisfier should not select same storage type as source and destination in same datanode. Surendra Singh Lilhore.

2018-07-04 Thread rakeshr
HDFS-11726. [SPS]: StoragePolicySatisfier should not select same storage type 
as source and destination in same datanode. Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/015174a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/015174a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/015174a5

Branch: refs/heads/HDFS-10285
Commit: 015174a510789a0f4a4b66ccbfa3bfe3382bd99b
Parents: 35a6fde
Author: Rakesh Radhakrishnan 
Authored: Fri Jun 9 14:03:13 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:35:13 2018 +0530

--
 .../server/namenode/StoragePolicySatisfier.java | 23 ++
 .../namenode/TestStoragePolicySatisfier.java| 44 
 2 files changed, 58 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/015174a5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 9e2a4a0..1b2afa3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -501,15 +501,20 @@ public class StoragePolicySatisfier implements Runnable {
 // avoid choosing a target which already has this block.
 for (int i = 0; i < sourceWithStorageList.size(); i++) {
   StorageTypeNodePair existingTypeNodePair = sourceWithStorageList.get(i);
-  StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(blockInfo,
-  existingTypeNodePair.dn, expected);
-  if (chosenTarget != null) {
-sourceNodes.add(existingTypeNodePair.dn);
-sourceStorageTypes.add(existingTypeNodePair.storageType);
-targetNodes.add(chosenTarget.dn);
-targetStorageTypes.add(chosenTarget.storageType);
-expected.remove(chosenTarget.storageType);
-// TODO: We can increment scheduled block count for this node?
+
+  // Check whether the block replica is already placed in the expected
+  // storage type in this source datanode.
+  if (!expected.contains(existingTypeNodePair.storageType)) {
+StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(
+blockInfo, existingTypeNodePair.dn, expected);
+if (chosenTarget != null) {
+  sourceNodes.add(existingTypeNodePair.dn);
+  sourceStorageTypes.add(existingTypeNodePair.storageType);
+  targetNodes.add(chosenTarget.dn);
+  targetStorageTypes.add(chosenTarget.storageType);
+  expected.remove(chosenTarget.storageType);
+  // TODO: We can increment scheduled block count for this node?
+}
   }
   // To avoid choosing this excludeNodes as targets later
   excludeNodes.add(existingTypeNodePair.dn);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/015174a5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 8e08a1e..f1a4169 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -764,6 +764,50 @@ public class TestStoragePolicySatisfier {
   }
 
   /**
+   * If replica with expected storage type already exist in source DN then that
+   * DN should be skipped.
+   */
+  @Test(timeout = 30)
+  public void testSPSWhenReplicaWithExpectedStorageAlreadyAvailableInSource()
+  throws Exception {
+StorageType[][] diskTypes = new StorageType[][] {
+{StorageType.DISK, StorageType.ARCHIVE},
+{StorageType.DISK, StorageType.ARCHIVE},
+{StorageType.DISK, StorageType.ARCHIVE}};
+
+try {
+  hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
+  storagesPerDatanode, capacity);
+  dfs = hdfsCluster.getFileSystem();
+  // 1. Write two replica on disk
+  DFSTestUtil.createFile(dfs, new Path(file), DEFAULT_BLOCK_SIZE,
+  (short) 2, 

[10/50] [abbrv] hadoop git commit: HDFS-11965: [SPS]: Should give chance to satisfy the low redundant blocks before removing the xattr. Contributed by Surendra Singh Lilhore.

2018-07-04 Thread rakeshr
HDFS-11965: [SPS]: Should give chance to satisfy the low redundant blocks 
before removing the xattr. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16b58c82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16b58c82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16b58c82

Branch: refs/heads/HDFS-10285
Commit: 16b58c82b5360cc341950dac5fdf5c18dde1006a
Parents: 0542c87
Author: Uma Maheswara Rao G 
Authored: Mon Jul 10 18:00:58 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:35:29 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|  15 +++
 .../server/namenode/StoragePolicySatisfier.java |  20 +++-
 .../namenode/TestStoragePolicySatisfier.java| 102 ++-
 ...stStoragePolicySatisfierWithStripedFile.java |  90 
 4 files changed, 224 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/16b58c82/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 1319a2c..ddf3f6c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4324,6 +4324,21 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
+   * Check file has low redundancy blocks.
+   */
+  public boolean hasLowRedundancyBlocks(BlockCollection bc) {
+boolean result = false;
+for (BlockInfo block : bc.getBlocks()) {
+  short expected = getExpectedRedundancyNum(block);
+  final NumberReplicas n = countNodes(block);
+  if (expected > n.liveReplicas()) {
+result = true;
+  }
+}
+return result;
+  }
+
+  /**
* Check sufficient redundancy of the blocks in the collection. If any block
* is needed reconstruction, insert it into the reconstruction queue.
* Otherwise, if the block is more than the expected replication factor,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16b58c82/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 1b2afa3..97cbf1b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -99,7 +99,10 @@ public class StoragePolicySatisfier implements Runnable {
 // Represents that, the analysis skipped due to some conditions.
 // Example conditions are if no blocks really exists in block collection or
 // if analysis is not required on ec files with unsuitable storage policies
-BLOCKS_TARGET_PAIRING_SKIPPED;
+BLOCKS_TARGET_PAIRING_SKIPPED,
+// Represents that, All the reported blocks are satisfied the policy but
+// some of the blocks are low redundant.
+FEW_LOW_REDUNDANCY_BLOCKS
   }
 
   public StoragePolicySatisfier(final Namesystem namesystem,
@@ -247,6 +250,14 @@ public class StoragePolicySatisfier implements Runnable {
   case FEW_BLOCKS_TARGETS_PAIRED:
 this.storageMovementsMonitor.add(blockCollectionID, false);
 break;
+  case FEW_LOW_REDUNDANCY_BLOCKS:
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Adding trackID " + blockCollectionID
+  + " back to retry queue as some of the blocks"
+  + " are low redundant.");
+}
+this.storageMovementNeeded.add(blockCollectionID);
+break;
   // Just clean Xattrs
   case BLOCKS_TARGET_PAIRING_SKIPPED:
   case BLOCKS_ALREADY_SATISFIED:
@@ -347,11 +358,16 @@ public class StoragePolicySatisfier implements Runnable {
 boolean computeStatus = computeBlockMovingInfos(blockMovingInfos,
 blockInfo, expectedStorageTypes, existing, storages);
 if (computeStatus
-&& status != 

[42/50] [abbrv] hadoop git commit: HDFS-13097: [SPS]: Fix the branch review comments(Part1). Contributed by Surendra Singh.

2018-07-04 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f26fca8b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index 85a101f..47ea39f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -252,8 +252,8 @@ public class TestNameNodeReconfigure {
 // Since DFS_STORAGE_POLICY_ENABLED_KEY is disabled, SPS can't be enabled.
 assertEquals("SPS shouldn't start as "
 + DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY + " is disabled", false,
-nameNode.getNamesystem().getBlockManager()
-.isStoragePolicySatisfierRunning());
+nameNode.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
 StoragePolicySatisfierMode.INTERNAL, false);
 
@@ -280,8 +280,8 @@ public class TestNameNodeReconfigure {
   fail("ReconfigurationException expected");
 } catch (ReconfigurationException e) {
   GenericTestUtils.assertExceptionContains(
-  "For enabling or disabling storage policy satisfier, we must "
-  + "pass either none/internal/external string value only",
+  "For enabling or disabling storage policy satisfier, must "
+  + "pass either internal/external/none string value only",
   e.getCause());
 }
 
@@ -301,8 +301,8 @@ public class TestNameNodeReconfigure {
 nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
 StoragePolicySatisfierMode.EXTERNAL.toString());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
-false, nameNode.getNamesystem().getBlockManager()
-.isStoragePolicySatisfierRunning());
+false, nameNode.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
 StoragePolicySatisfierMode.EXTERNAL.toString(),
 nameNode.getConf().get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
@@ -342,8 +342,8 @@ public class TestNameNodeReconfigure {
 nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
 StoragePolicySatisfierMode.INTERNAL.toString());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
-true, nameNode.getNamesystem().getBlockManager()
-.isStoragePolicySatisfierRunning());
+true, nameNode.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
 StoragePolicySatisfierMode.INTERNAL.toString(),
 nameNode.getConf().get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
@@ -353,7 +353,8 @@ public class TestNameNodeReconfigure {
   void verifySPSEnabled(final NameNode nameNode, String property,
   StoragePolicySatisfierMode expected, boolean isSatisfierRunning) {
 assertEquals(property + " has wrong value", isSatisfierRunning, nameNode
-.getNamesystem().getBlockManager().isStoragePolicySatisfierRunning());
+.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 String actual = nameNode.getConf().get(property,
 DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT);
 assertEquals(property + " has wrong value", expected,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f26fca8b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index b84214c..9f98777 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -389,7 +389,8 @@ public class TestPersistentStoragePolicySatisfier {
   fs.setStoragePolicy(testFile, ONE_SSD);
   fs.satisfyStoragePolicy(testFile);
 
-  cluster.getNamesystem().getBlockManager().disableSPS();
+  

[20/50] [abbrv] hadoop git commit: HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block storage movements. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9202cac4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index d3c5cb1..2f621e6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -156,7 +156,7 @@ import 
org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -1517,14 +1517,15 @@ public class NameNodeRpcServer implements 
NamenodeProtocols {
   boolean requestFullBlockReportLease,
   @Nonnull SlowPeerReports slowPeers,
   @Nonnull SlowDiskReports slowDisks,
-  BlocksStorageMovementResult[] blkMovementStatus) throws IOException {
+  BlocksStorageMoveAttemptFinished storageMovementFinishedBlks)
+  throws IOException {
 checkNNStartup();
 verifyRequest(nodeReg);
 return namesystem.handleHeartbeat(nodeReg, report,
 dnCacheCapacity, dnCacheUsed, xceiverCount, xmitsInProgress,
 failedVolumes, volumeFailureSummary, requestFullBlockReportLease,
 slowPeers, slowDisks,
-blkMovementStatus);
+storageMovementFinishedBlks);
   }
 
   @Override // DatanodeProtocol

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9202cac4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index a4372d5..a28a806 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.util.Time.monotonicNow;
+
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -44,7 +46,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import 
org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.util.Daemon;
@@ -82,25 +84,38 @@ public class StoragePolicySatisfier implements Runnable {
   /**
* Represents the collective analysis status for all blocks.
*/
-  private enum BlocksMovingAnalysisStatus {
-// Represents that, the analysis skipped due to some conditions. A such
-// condition is if block collection is in incomplete state.
-ANALYSIS_SKIPPED_FOR_RETRY,
-// Represents that, all block storage movement needed blocks found its
-// targets.
-ALL_BLOCKS_TARGETS_PAIRED,
-// Represents that, only fewer or none of the block storage movement needed
-// block found its eligible targets.
-FEW_BLOCKS_TARGETS_PAIRED,
-// Represents that, none of the blocks found for block storage movements.
-BLOCKS_ALREADY_SATISFIED,
-// Represents that, the analysis skipped due to some conditions.
-// Example conditions are if no blocks really exists in block collection or
-// if analysis is not required on ec files with unsuitable storage policies
-BLOCKS_TARGET_PAIRING_SKIPPED,
-// Represents that, All the reported blocks are satisfied the policy but
-// some of the blocks are low redundant.
-FEW_LOW_REDUNDANCY_BLOCKS
+  private 

[34/50] [abbrv] hadoop git commit: HDFS-12911. [SPS]: Modularize the SPS code and expose necessary interfaces for external/internal implementations. Contributed by Uma Maheswara Rao G

2018-07-04 Thread rakeshr
HDFS-12911. [SPS]: Modularize the SPS code and expose necessary interfaces for 
external/internal implementations. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2412c34e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2412c34e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2412c34e

Branch: refs/heads/HDFS-10285
Commit: 2412c34e5eabeeea0975ddd817569f3653495714
Parents: 8f4c8c1
Author: Rakesh Radhakrishnan 
Authored: Fri Jan 19 08:51:49 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:46:54 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|  61 +-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  16 +-
 .../hdfs/server/namenode/FSDirectory.java   |   6 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  10 +-
 .../namenode/sps/BlockMoveTaskHandler.java  |  44 
 .../namenode/sps/BlockMovementListener.java |  40 
 .../sps/BlockStorageMovementAttemptedItems.java |  28 +--
 .../sps/BlockStorageMovementNeeded.java | 207 ---
 .../hdfs/server/namenode/sps/Context.java   |  43 ++--
 .../server/namenode/sps/FileIdCollector.java|  43 
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |  62 ++
 .../namenode/sps/IntraSPSNameNodeContext.java   |  62 ++
 .../sps/IntraSPSNameNodeFileIdCollector.java| 178 
 .../hdfs/server/namenode/sps/ItemInfo.java  |  81 
 .../hdfs/server/namenode/sps/SPSPathIds.java|  63 ++
 .../hdfs/server/namenode/sps/SPSService.java| 107 ++
 .../namenode/sps/StoragePolicySatisfier.java| 175 +++-
 .../TestBlockStorageMovementAttemptedItems.java |  19 +-
 .../sps/TestStoragePolicySatisfier.java | 111 ++
 ...stStoragePolicySatisfierWithStripedFile.java |  19 +-
 20 files changed, 938 insertions(+), 437 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2412c34e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index c2d5162..63117ce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -93,8 +93,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
-import org.apache.hadoop.hdfs.server.namenode.sps.Context;
-import org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeContext;
+import org.apache.hadoop.hdfs.server.namenode.sps.SPSPathIds;
+import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
@@ -434,7 +434,8 @@ public class BlockManager implements BlockStatsMXBean {
   private final StoragePolicySatisfier sps;
   private final boolean storagePolicyEnabled;
   private boolean spsEnabled;
-  private Context spsctxt = null;
+  private final SPSPathIds spsPaths;
+
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
*/
@@ -481,8 +482,8 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-spsctxt = new IntraSPSNameNodeContext(namesystem, this, conf);
-sps = new StoragePolicySatisfier(spsctxt);
+sps = new StoragePolicySatisfier(conf);
+spsPaths = new SPSPathIds();
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@@ -5033,8 +5034,7 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already running.");
   return;
 }
-// TODO: FSDirectory will get removed via HDFS-12911 modularization work
-sps.start(false, namesystem.getFSDirectory());
+sps.start(false);
   }
 
   /**
@@ -5070,8 +5070,7 @@ public class BlockManager implements BlockStatsMXBean {
   

[01/50] [abbrv] hadoop git commit: HDFS-11338: [SPS]: Fix timeout issue in unit tests caused by longger NN down time. Contributed by Wei Zhou and Rakesh R [Forced Update!]

2018-07-04 Thread rakeshr
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10285 aa59f4231 -> 9e2cb0cef (forced update)


HDFS-11338: [SPS]: Fix timeout issue in unit tests caused by longger NN down 
time. Contributed by Wei Zhou and Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85fc7135
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85fc7135
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85fc7135

Branch: refs/heads/HDFS-10285
Commit: 85fc713547f5e3f3e9ba4f8f51f73357873dbe4c
Parents: 104a432
Author: Uma Maheswara Rao G 
Authored: Tue Apr 11 14:25:01 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:34:41 2018 +0530

--
 .../server/blockmanagement/BlockManager.java| 13 +--
 .../BlockStorageMovementAttemptedItems.java | 25 +
 .../hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../server/namenode/StoragePolicySatisfier.java | 38 ++--
 .../TestBlockStorageMovementAttemptedItems.java |  3 +-
 5 files changed, 60 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85fc7135/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 40e9617..1319a2c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -726,7 +726,7 @@ public class BlockManager implements BlockStatsMXBean {
 
   public void close() {
 if (sps != null) {
-  sps.stop(false);
+  sps.deactivate(false);
 }
 bmSafeMode.close();
 try {
@@ -741,6 +741,7 @@ public class BlockManager implements BlockStatsMXBean {
 datanodeManager.close();
 pendingReconstruction.stop();
 blocksMap.close();
+stopSPSGracefully();
   }
 
   /** @return the datanodeManager */
@@ -5059,10 +5060,18 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already stopped.");
   return;
 }
-sps.stop(true);
+sps.deactivate(true);
   }
 
   /**
+   * Timed wait to stop storage policy satisfier daemon threads.
+   */
+  public void stopSPSGracefully() {
+if (sps != null) {
+  sps.stopGracefully();
+}
+  }
+  /**
* @return True if storage policy satisfier running.
*/
   public boolean isStoragePolicySatisfierRunning() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85fc7135/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index f15db73..26b98d8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -130,21 +130,34 @@ public class BlockStorageMovementAttemptedItems {
   }
 
   /**
-   * Stops the monitor thread.
+   * Sets running flag to false. Also, this will interrupt monitor thread and
+   * clear all the queued up tasks.
*/
-  public synchronized void stop() {
+  public synchronized void deactivate() {
 monitorRunning = false;
 if (timerThread != null) {
   timerThread.interrupt();
-  try {
-timerThread.join(3000);
-  } catch (InterruptedException ie) {
-  }
 }
 this.clearQueues();
   }
 
   /**
+   * Timed wait to stop monitor thread.
+   */
+  synchronized void stopGracefully() {
+if (timerThread == null) {
+  return;
+}
+if (monitorRunning) {
+  deactivate();
+}
+try {
+  timerThread.join(3000);
+} catch (InterruptedException ie) {
+}
+  }
+
+  /**
* This class contains information of an attempted trackID. Information such
* as, (a)last attempted time stamp, (b)whether all the blocks in the trackID
* were attempted and blocks movement has been scheduled to satisfy storage


[28/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbac/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
deleted file mode 100644
index 6991ad2..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
+++ /dev/null
@@ -1,580 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
-import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
-import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Supplier;
-
-/**
- * Tests that StoragePolicySatisfier daemon is able to check the striped blocks
- * to be moved and finding its expected target locations in order to satisfy 
the
- * storage policy.
- */
-public class TestStoragePolicySatisfierWithStripedFile {
-
-  private static final Logger LOG = LoggerFactory
-  .getLogger(TestStoragePolicySatisfierWithStripedFile.class);
-
-  private final int stripesPerBlock = 2;
-
-  private ErasureCodingPolicy ecPolicy;
-  private int dataBlocks;
-  private int parityBlocks;
-  private int cellSize;
-  private int defaultStripeBlockSize;
-
-  private ErasureCodingPolicy getEcPolicy() {
-return StripedFileTestUtil.getDefaultECPolicy();
-  }
-
-  /**
-   * Initialize erasure coding policy.
-   */
-  @Before
-  public void init(){
-ecPolicy = getEcPolicy();
-dataBlocks = ecPolicy.getNumDataUnits();
-parityBlocks = ecPolicy.getNumParityUnits();
-cellSize = ecPolicy.getCellSize();
-defaultStripeBlockSize = cellSize * stripesPerBlock;
-  }
-
-  /**
-   * Tests to verify that all the striped blocks(data + parity blocks) are
-   * moving to satisfy the storage policy.
-   */
-  @Test(timeout = 30)
-  public void testMoverWithFullStripe() throws Exception {
-// start 10 datanodes
-int numOfDatanodes = 10;
-int storagesPerDatanode = 2;
-long capacity = 20 * defaultStripeBlockSize;
-long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
-for (int i = 0; i < numOfDatanodes; i++) {
-  for (int j = 0; j < storagesPerDatanode; j++) {
-capacities[i][j] = capacity;
-  }
-}
-
-final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
-initConfWithStripe(conf, defaultStripeBlockSize);
-final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-.numDataNodes(numOfDatanodes)
-.storagesPerDatanode(storagesPerDatanode)
-   

[17/50] [abbrv] hadoop git commit: HDFS-12225: [SPS]: Optimize extended attributes for tracking SPS movements. Contributed by Surendra Singh Lilhore.

2018-07-04 Thread rakeshr
HDFS-12225: [SPS]: Optimize extended attributes for tracking SPS movements. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcc32361
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcc32361
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcc32361

Branch: refs/heads/HDFS-10285
Commit: bcc3236132b2f678fd1b08e445f3aec73ce472cb
Parents: a59eb62
Author: Uma Maheswara Rao G 
Authored: Wed Aug 23 15:37:03 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:36:07 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|  21 +-
 .../server/blockmanagement/DatanodeManager.java |  14 +-
 .../hdfs/server/datanode/BPOfferService.java|   1 +
 .../BlockStorageMovementAttemptedItems.java |  95 +---
 .../namenode/BlockStorageMovementNeeded.java| 233 ++-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  91 +++-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  11 +-
 .../hdfs/server/namenode/FSDirectory.java   |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../server/namenode/StoragePolicySatisfier.java | 108 ++---
 .../TestStoragePolicySatisfyWorker.java |   5 +-
 .../TestBlockStorageMovementAttemptedItems.java |  34 +--
 .../TestPersistentStoragePolicySatisfier.java   | 104 +
 .../namenode/TestStoragePolicySatisfier.java| 127 +-
 14 files changed, 589 insertions(+), 259 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc32361/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a442a92..0ee558a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,7 +89,6 @@ import 
org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.BlockStorageMovementNeeded;
 import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
@@ -431,9 +430,6 @@ public class BlockManager implements BlockStatsMXBean {
   private final StoragePolicySatisfier sps;
   private final boolean storagePolicyEnabled;
   private boolean spsEnabled;
-  private final BlockStorageMovementNeeded storageMovementNeeded =
-  new BlockStorageMovementNeeded();
-
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
*/
@@ -480,8 +476,7 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-sps = new StoragePolicySatisfier(namesystem, storageMovementNeeded, this,
-conf);
+sps = new StoragePolicySatisfier(namesystem, this, conf);
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@@ -5009,20 +5004,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Set file block collection for which storage movement needed for its 
blocks.
-   *
-   * @param id
-   *  - file block collection id.
-   */
-  public void satisfyStoragePolicy(long id) {
-storageMovementNeeded.add(id);
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Added block collection id {} to block "
-  + "storageMovementNeeded queue", id);
-}
-  }
-
-  /**
* Gets the storage policy satisfier instance.
*
* @return sps

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc32361/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 

[38/50] [abbrv] hadoop git commit: HDFS-13057: [SPS]: Revisit configurations to make SPS service modes internal/external/none. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-13057: [SPS]: Revisit configurations to make SPS service modes 
internal/external/none. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96fb1858
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96fb1858
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96fb1858

Branch: refs/heads/HDFS-10285
Commit: 96fb185805d2d37bc977983adc909edca18ef573
Parents: 49ee352
Author: Uma Maheswara Rao G 
Authored: Fri Jan 26 08:57:29 2018 -0800
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:50:09 2018 +0530

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |  39 
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +-
 .../server/blockmanagement/BlockManager.java| 105 +++---
 .../hdfs/server/namenode/FSNamesystem.java  |   6 +-
 .../hdfs/server/namenode/FSTreeTraverser.java   |   2 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |  34 ++--
 .../sps/BlockStorageMovementNeeded.java |   2 +-
 .../namenode/sps/IntraSPSNameNodeContext.java   |   3 +
 .../hdfs/server/namenode/sps/SPSService.java|   4 +-
 .../namenode/sps/StoragePolicySatisfier.java|  17 +-
 .../server/sps/ExternalSPSFileIDCollector.java  |  32 ++-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  16 +-
 .../src/main/resources/hdfs-default.xml |  11 +-
 .../src/site/markdown/ArchivalStorage.md|  17 +-
 .../TestStoragePolicySatisfyWorker.java |   5 +-
 .../hadoop/hdfs/server/mover/TestMover.java |  45 +++--
 .../hdfs/server/mover/TestStorageMover.java |   4 +-
 .../namenode/TestNameNodeReconfigure.java   | 105 +-
 .../TestPersistentStoragePolicySatisfier.java   |   9 +-
 .../TestStoragePolicySatisfierWithHA.java   |  12 +-
 .../sps/TestStoragePolicySatisfier.java | 202 +++
 ...stStoragePolicySatisfierWithStripedFile.java |  17 +-
 .../sps/TestExternalStoragePolicySatisfier.java | 112 +++---
 .../hdfs/tools/TestStoragePolicyCommands.java   |   5 +-
 .../TestStoragePolicySatisfyAdminCommands.java  |  14 +-
 25 files changed, 500 insertions(+), 327 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fb1858/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index aabcdd9..ab48dcd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -129,6 +129,45 @@ public final class HdfsConstants {
   }
 
   /**
+   * Storage policy satisfier service modes.
+   */
+  public enum StoragePolicySatisfierMode {
+
+/**
+ * This mode represents that SPS service is running inside Namenode and can
+ * accept any SPS call request.
+ */
+INTERNAL,
+
+/**
+ * This mode represents that SPS service is running outside Namenode as an
+ * external service and can accept any SPS call request.
+ */
+EXTERNAL,
+
+/**
+ * This mode represents that SPS service is disabled and cannot accept any
+ * SPS call request.
+ */
+NONE;
+
+private static final Map MAP =
+new HashMap<>();
+
+static {
+  for (StoragePolicySatisfierMode a : values()) {
+MAP.put(a.name(), a);
+  }
+}
+
+/** Convert the given String to a StoragePolicySatisfierMode. */
+public static StoragePolicySatisfierMode fromString(String s) {
+  return MAP.get(StringUtils.toUpperCase(s));
+}
+  }
+
+
+  /**
* Storage policy satisfy path status.
*/
   public enum StoragePolicySatisfyPathStatus {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fb1858/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index fc95d8c..0d93ff3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
 import 

[27/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbac/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
new file mode 100644
index 000..8dc52dc
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
@@ -0,0 +1,1779 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.sps;
+
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.ReconfigurationException;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
+import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+
+import com.google.common.base.Supplier;
+
+/**
+ * Tests that StoragePolicySatisfier daemon is able to check the blocks to be
+ * moved and finding its suggested target locations to move.
+ */
+public class TestStoragePolicySatisfier {
+
+  {
+GenericTestUtils.setLogLevel(
+getLogger(FSTreeTraverser.class), Level.DEBUG);
+  }
+
+  private static final String ONE_SSD = "ONE_SSD";
+  private static final String COLD = "COLD";
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestStoragePolicySatisfier.class);
+  private final Configuration config = new HdfsConfiguration();
+  private StorageType[][] allDiskTypes =
+ 

[08/50] [abbrv] hadoop git commit: HDFS-11966. [SPS] Correct the log in BlockStorageMovementAttemptedItems#blockStorageMovementResultCheck. Contributed by Surendra Singh Lilhore.

2018-07-04 Thread rakeshr
HDFS-11966. [SPS] Correct the log in 
BlockStorageMovementAttemptedItems#blockStorageMovementResultCheck. Contributed 
by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b1d8dcce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b1d8dcce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b1d8dcce

Branch: refs/heads/HDFS-10285
Commit: b1d8dcce5e8100a0e30b3aa8aa758d1316feca25
Parents: 015174a
Author: Rakesh Radhakrishnan 
Authored: Sun Jun 18 11:00:28 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:35:19 2018 +0530

--
 .../BlockStorageMovementAttemptedItems.java | 39 ++--
 1 file changed, 20 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1d8dcce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index bf7859c..6048986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -296,19 +296,17 @@ public class BlockStorageMovementAttemptedItems {
 .next();
 synchronized (storageMovementAttemptedItems) {
   Status status = storageMovementAttemptedResult.getStatus();
+  long trackId = storageMovementAttemptedResult.getTrackId();
   ItemInfo itemInfo;
   switch (status) {
   case FAILURE:
-blockStorageMovementNeeded
-.add(storageMovementAttemptedResult.getTrackId());
+blockStorageMovementNeeded.add(trackId);
 LOG.warn("Blocks storage movement results for the tracking id: {}"
 + " is reported from co-ordinating datanode, but result"
-+ " status is FAILURE. So, added for retry",
-storageMovementAttemptedResult.getTrackId());
++ " status is FAILURE. So, added for retry", trackId);
 break;
   case SUCCESS:
-itemInfo = storageMovementAttemptedItems
-.get(storageMovementAttemptedResult.getTrackId());
+itemInfo = storageMovementAttemptedItems.get(trackId);
 
 // ItemInfo could be null. One case is, before the blocks movements
 // result arrives the attempted trackID became timed out and then
@@ -318,20 +316,23 @@ public class BlockStorageMovementAttemptedItems {
 // following condition. If all the block locations under the 
trackID
 // are attempted and failed to find matching target nodes to 
satisfy
 // storage policy in previous SPS iteration.
-if (itemInfo != null
-&& !itemInfo.isAllBlockLocsAttemptedToSatisfy()) {
-  blockStorageMovementNeeded
-  .add(storageMovementAttemptedResult.getTrackId());
-  LOG.warn("Blocks storage movement is SUCCESS for the track id: 
{}"
-  + " reported from co-ordinating datanode. But adding trackID"
-  + " back to retry queue as some of the blocks couldn't find"
-  + " matching target nodes in previous SPS iteration.",
-  storageMovementAttemptedResult.getTrackId());
+String msg = "Blocks storage movement is SUCCESS for the track id: 
"
++ trackId + " reported from co-ordinating datanode.";
+if (itemInfo != null) {
+  if (!itemInfo.isAllBlockLocsAttemptedToSatisfy()) {
+blockStorageMovementNeeded.add(trackId);
+LOG.warn("{} But adding trackID back to retry queue as some of"
++ " the blocks couldn't find matching target nodes in"
++ " previous SPS iteration.", msg);
+  } else {
+LOG.info(msg);
+// Remove xattr for the track id.
+this.sps.postBlkStorageMovementCleanup(
+storageMovementAttemptedResult.getTrackId());
+  }
 } else {
-  LOG.info("Blocks storage movement is SUCCESS for the track id: 
{}"
-  + " reported from co-ordinating datanode. But the trackID "
-  + "doesn't exists in storageMovementAttemptedItems list",
-  

[33/50] [abbrv] hadoop git commit: HDFS-12911. [SPS]: Modularize the SPS code and expose necessary interfaces for external/internal implementations. Contributed by Uma Maheswara Rao G

2018-07-04 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2412c34e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
index 2a7bde5..9354044 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
@@ -72,7 +72,6 @@ import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Test;
-import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -147,12 +146,11 @@ public class TestStoragePolicySatisfier {
 startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
 storagesPerDatanode, capacity, hdfsCluster);
 
-dfs.satisfyStoragePolicy(new Path(file));
-
 hdfsCluster.triggerHeartbeats();
+dfs.satisfyStoragePolicy(new Path(file));
 // Wait till namenode notified about the block location details
-DFSTestUtil.waitExpectedStorageType(
-file, StorageType.ARCHIVE, 3, 3, dfs);
+DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 3, 35000,
+dfs);
   }
 
   @Test(timeout = 30)
@@ -1284,6 +1282,7 @@ public class TestStoragePolicySatisfier {
 {StorageType.ARCHIVE, StorageType.SSD},
 {StorageType.DISK, StorageType.DISK}};
 config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+config.setInt(DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY, 10);
 hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
 storagesPerDatanode, capacity);
 dfs = hdfsCluster.getFileSystem();
@@ -1299,19 +1298,28 @@ public class TestStoragePolicySatisfier {
 
 //Queue limit can control the traverse logic to wait for some free
 //entry in queue. After 10 files, traverse control will be on U.
-StoragePolicySatisfier sps = Mockito.mock(StoragePolicySatisfier.class);
-Mockito.when(sps.isRunning()).thenReturn(true);
-Context ctxt = Mockito.mock(Context.class);
-config.setInt(DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY, 10);
-Mockito.when(ctxt.getConf()).thenReturn(config);
-Mockito.when(ctxt.isRunning()).thenReturn(true);
-Mockito.when(ctxt.isInSafeMode()).thenReturn(false);
-Mockito.when(ctxt.isFileExist(Mockito.anyLong())).thenReturn(true);
-BlockStorageMovementNeeded movmentNeededQueue =
-new BlockStorageMovementNeeded(ctxt);
+StoragePolicySatisfier sps = new StoragePolicySatisfier(config);
+Context ctxt = new IntraSPSNameNodeContext(hdfsCluster.getNamesystem(),
+hdfsCluster.getNamesystem().getBlockManager(), sps) {
+  @Override
+  public boolean isInSafeMode() {
+return false;
+  }
+
+  @Override
+  public boolean isRunning() {
+return true;
+  }
+};
+
+FileIdCollector fileIDCollector =
+new IntraSPSNameNodeFileIdCollector(fsDir, sps);
+sps.init(ctxt, fileIDCollector, null);
+sps.getStorageMovementQueue().activate();
+
 INode rootINode = fsDir.getINode("/root");
-movmentNeededQueue.addToPendingDirQueue(rootINode.getId());
-movmentNeededQueue.init(fsDir);
+hdfsCluster.getNamesystem().getBlockManager()
+.addSPSPathId(rootINode.getId());
 
 //Wait for thread to reach U.
 Thread.sleep(1000);
@@ -1321,7 +1329,7 @@ public class TestStoragePolicySatisfier {
 // Remove 10 element and make queue free, So other traversing will start.
 for (int i = 0; i < 10; i++) {
   String path = expectedTraverseOrder.remove(0);
-  long trackId = movmentNeededQueue.get().getTrackId();
+  long trackId = sps.getStorageMovementQueue().get().getFileId();
   INode inode = fsDir.getInode(trackId);
   assertTrue("Failed to traverse tree, expected " + path + " but got "
   + inode.getFullPathName(), path.equals(inode.getFullPathName()));
@@ -1332,7 +1340,7 @@ public class TestStoragePolicySatisfier {
 // Check other element traversed in order and R,S should not be added in
 // queue which we already removed from expected list
 for (String path : expectedTraverseOrder) {
-  long trackId = movmentNeededQueue.get().getTrackId();
+  long trackId = sps.getStorageMovementQueue().get().getFileId();
   INode inode = fsDir.getInode(trackId);
   assertTrue("Failed to traverse tree, expected " + path + " but got "
   + inode.getFullPathName(), path.equals(inode.getFullPathName()));
@@ -1352,6 +1360,7 @@ public class 

[14/50] [abbrv] hadoop git commit: HDFS-12141: [SPS]: Fix checkstyle warnings. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-12141: [SPS]: Fix checkstyle warnings. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d1e4913
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d1e4913
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d1e4913

Branch: refs/heads/HDFS-10285
Commit: 8d1e491350024ef9d5f920c477529ef8bd357fdc
Parents: ecae2af
Author: Uma Maheswara Rao G 
Authored: Mon Jul 17 10:24:06 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:35:50 2018 +0530

--
 .../hdfs/server/blockmanagement/BlockManager.java   |  2 +-
 .../server/datanode/StoragePolicySatisfyWorker.java |  6 +++---
 .../hdfs/server/namenode/StoragePolicySatisfier.java|  6 +++---
 .../hadoop/hdfs/server/protocol/DatanodeProtocol.java   |  5 ++---
 .../org/apache/hadoop/hdfs/server/mover/TestMover.java  |  7 ---
 .../server/namenode/TestStoragePolicySatisfier.java | 12 ++--
 6 files changed, 19 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d1e4913/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ddf3f6c..5db2c3a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -427,7 +427,7 @@ public class BlockManager implements BlockStatsMXBean {
 
   private final BlockIdManager blockIdManager;
 
-  /** For satisfying block storage policies */
+  /** For satisfying block storage policies. */
   private final StoragePolicySatisfier sps;
   private final BlockStorageMovementNeeded storageMovementNeeded =
   new BlockStorageMovementNeeded();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d1e4913/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index f4f97dd..196cd58 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -329,7 +329,7 @@ public class StoragePolicySatisfyWorker {
   /**
* Block movement status code.
*/
-  public static enum BlockMovementStatus {
+  public enum BlockMovementStatus {
 /** Success. */
 DN_BLK_STORAGE_MOVEMENT_SUCCESS(0),
 /**
@@ -343,7 +343,7 @@ public class StoragePolicySatisfyWorker {
 
 private final int code;
 
-private BlockMovementStatus(int code) {
+BlockMovementStatus(int code) {
   this.code = code;
 }
 
@@ -365,7 +365,7 @@ public class StoragePolicySatisfyWorker {
 private final DatanodeInfo target;
 private final BlockMovementStatus status;
 
-public BlockMovementResult(long trackId, long blockId,
+BlockMovementResult(long trackId, long blockId,
 DatanodeInfo target, BlockMovementStatus status) {
   this.trackId = trackId;
   this.blockId = blockId;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d1e4913/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 00b4cd0..af3b7f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -714,10 +714,10 @@ public class StoragePolicySatisfier implements Runnable {
   }
 
   private static class StorageTypeNodePair {
-public StorageType storageType = null;
-public DatanodeDescriptor dn = null;
+private StorageType storageType = null;
+private 

[21/50] [abbrv] hadoop git commit: HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block storage movements. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block 
storage movements. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9202cac4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9202cac4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9202cac4

Branch: refs/heads/HDFS-10285
Commit: 9202cac4a8ce7bb9791a71e35b340f20d7cbb19d
Parents: 7c076d1
Author: Uma Maheswara Rao G 
Authored: Thu Oct 12 17:17:51 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:43:52 2018 +0530

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +-
 .../DatanodeProtocolClientSideTranslatorPB.java |  12 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   4 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 150 +++-
 .../blockmanagement/DatanodeDescriptor.java |  50 ++-
 .../server/blockmanagement/DatanodeManager.java | 104 --
 .../hdfs/server/datanode/BPOfferService.java|   3 +-
 .../hdfs/server/datanode/BPServiceActor.java|  33 +-
 .../datanode/BlockStorageMovementTracker.java   |  80 ++---
 .../datanode/StoragePolicySatisfyWorker.java| 214 
 .../BlockStorageMovementAttemptedItems.java | 299 
 .../BlockStorageMovementInfosBatch.java |  61 
 .../hdfs/server/namenode/FSNamesystem.java  |  11 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   7 +-
 .../server/namenode/StoragePolicySatisfier.java | 343 ++-
 .../protocol/BlockStorageMovementCommand.java   |  99 ++
 .../BlocksStorageMoveAttemptFinished.java   |  48 +++
 .../protocol/BlocksStorageMovementResult.java   |  74 
 .../hdfs/server/protocol/DatanodeProtocol.java  |   5 +-
 .../src/main/proto/DatanodeProtocol.proto   |  30 +-
 .../src/main/resources/hdfs-default.xml |  21 +-
 .../src/site/markdown/ArchivalStorage.md|   6 +-
 .../TestNameNodePrunesMissingStorages.java  |   5 +-
 .../datanode/InternalDataNodeTestUtils.java |   4 +-
 .../server/datanode/TestBPOfferService.java |   4 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   4 +-
 .../server/datanode/TestDataNodeLifeline.java   |   6 +-
 .../TestDatanodeProtocolRetryPolicy.java|   4 +-
 .../server/datanode/TestFsDatasetCache.java |   4 +-
 .../TestStoragePolicySatisfyWorker.java |  52 ++-
 .../hdfs/server/datanode/TestStorageReport.java |   4 +-
 .../server/namenode/NNThroughputBenchmark.java  |   6 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   4 +-
 .../TestBlockStorageMovementAttemptedItems.java | 145 
 .../hdfs/server/namenode/TestDeadDatanode.java  |   4 +-
 .../namenode/TestStoragePolicySatisfier.java| 115 ++-
 ...stStoragePolicySatisfierWithStripedFile.java |  20 +-
 37 files changed, 908 insertions(+), 1135 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9202cac4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index c8fa40c..4a94220 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -623,11 +623,15 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.recheck.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =
-  5 * 60 * 1000;
+  1 * 60 * 1000;
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.self.retry.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_DEFAULT =
-  20 * 60 * 1000;
+  5 * 60 * 1000;
+  public static final String 
DFS_STORAGE_POLICY_SATISFIER_SHARE_EQUAL_REPLICA_MAX_STREAMS_KEY =
+  "dfs.storage.policy.satisfier.low.max-streams.preference";
+  public static final boolean 
DFS_STORAGE_POLICY_SATISFIER_SHARE_EQUAL_REPLICA_MAX_STREAMS_DEFAULT =
+  false;
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = 
"dfs.datanode.address";
   public static final int DFS_DATANODE_DEFAULT_PORT = 9866;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9202cac4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java

[02/50] [abbrv] hadoop git commit: HDFS-11334: [SPS]: NN switch and rescheduling movements can lead to have more than one coordinator for same file blocks. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-11334: [SPS]: NN switch and rescheduling movements can lead to have more 
than one coordinator for same file blocks. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d3e8acf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d3e8acf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d3e8acf

Branch: refs/heads/HDFS-10285
Commit: 2d3e8acfbb05acddd8fd4a82324e0d28824bb24e
Parents: 85fc713
Author: Uma Maheswara Rao G 
Authored: Tue Apr 18 15:23:58 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:34:46 2018 +0530

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   6 +
 .../server/blockmanagement/DatanodeManager.java |  12 ++
 .../hdfs/server/datanode/BPServiceActor.java|   4 +-
 .../datanode/BlockStorageMovementTracker.java   |  37 +++-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  12 +-
 .../datanode/StoragePolicySatisfyWorker.java|  95 +--
 .../BlockStorageMovementAttemptedItems.java |  80 ++---
 .../server/namenode/StoragePolicySatisfier.java |  15 +-
 .../protocol/BlocksStorageMovementResult.java   |   6 +-
 .../src/main/proto/DatanodeProtocol.proto   |   1 +
 .../TestStoragePolicySatisfyWorker.java |  68 
 .../TestStoragePolicySatisfierWithHA.java   | 170 +--
 13 files changed, 413 insertions(+), 95 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d3e8acf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 00152cc..b5341a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -619,7 +619,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.self.retry.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_DEFAULT =
-  30 * 60 * 1000;
+  20 * 60 * 1000;
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = 
"dfs.datanode.address";
   public static final int DFS_DATANODE_DEFAULT_PORT = 9866;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d3e8acf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 0c03608..996b986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -985,6 +985,9 @@ public class PBHelper {
   case FAILURE:
 status = Status.FAILURE;
 break;
+  case IN_PROGRESS:
+status = Status.IN_PROGRESS;
+break;
   default:
 throw new AssertionError("Unknown status: " + resultProto.getStatus());
   }
@@ -1011,6 +1014,9 @@ public class PBHelper {
   case FAILURE:
 status = BlocksStorageMovementResultProto.Status.FAILURE;
 break;
+  case IN_PROGRESS:
+status = BlocksStorageMovementResultProto.Status.IN_PROGRESS;
+break;
   default:
 throw new AssertionError("Unknown status: " + report.getStatus());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d3e8acf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index da340a8..2d7c80e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1091,6 +1091,18 @@ public class DatanodeManager {
   

[45/50] [abbrv] hadoop git commit: HDFS-13110: [SPS]: Reduce the number of APIs in NamenodeProtocol used by external satisfier. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-13110: [SPS]: Reduce the number of APIs in NamenodeProtocol used by 
external satisfier. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8947d989
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8947d989
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8947d989

Branch: refs/heads/HDFS-10285
Commit: 8947d989fb9d2139f8239864117aa4aa10b01df7
Parents: f26fca8
Author: Rakesh Radhakrishnan 
Authored: Fri Feb 16 17:01:38 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 09:01:08 2018 +0530

--
 .../NamenodeProtocolServerSideTranslatorPB.java |  46 +
 .../NamenodeProtocolTranslatorPB.java   |  42 +
 .../hdfs/server/namenode/FSTreeTraverser.java   |   2 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  32 +---
 .../server/namenode/ReencryptionHandler.java|   2 +-
 .../sps/BlockStorageMovementAttemptedItems.java |  42 +++--
 .../sps/BlockStorageMovementNeeded.java | 119 +++--
 .../hdfs/server/namenode/sps/Context.java   |  55 +++---
 .../hdfs/server/namenode/sps/FileCollector.java |  48 +
 .../server/namenode/sps/FileIdCollector.java|  43 -
 .../namenode/sps/IntraSPSNameNodeContext.java   |  39 ++---
 .../sps/IntraSPSNameNodeFileIdCollector.java|  23 +--
 .../hdfs/server/namenode/sps/ItemInfo.java  |  39 +++--
 .../hdfs/server/namenode/sps/SPSService.java|  32 ++--
 .../namenode/sps/StoragePolicySatisfier.java| 129 +-
 .../sps/StoragePolicySatisfyManager.java|   6 +-
 .../hdfs/server/protocol/NamenodeProtocol.java  |  24 +--
 .../sps/ExternalSPSBlockMoveTaskHandler.java|   4 +-
 .../hdfs/server/sps/ExternalSPSContext.java |  60 +++
 .../server/sps/ExternalSPSFileIDCollector.java  | 174 ---
 .../sps/ExternalSPSFilePathCollector.java   | 172 ++
 .../sps/ExternalStoragePolicySatisfier.java |   7 +-
 .../src/main/proto/NamenodeProtocol.proto   |  27 +--
 .../TestBlockStorageMovementAttemptedItems.java |  27 ++-
 .../sps/TestStoragePolicySatisfier.java |  52 +++---
 ...stStoragePolicySatisfierWithStripedFile.java |  15 +-
 .../sps/TestExternalStoragePolicySatisfier.java | 148 +++-
 27 files changed, 701 insertions(+), 708 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8947d989/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index 25eafdf..ed176cc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -35,16 +35,12 @@ import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksReq
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksResponseProto;
 import 

[50/50] [abbrv] hadoop git commit: HDFS-13381 : [SPS]: Use DFSUtilClient#makePathFromFileId() to prepare satisfier file path. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-13381 : [SPS]: Use DFSUtilClient#makePathFromFileId() to prepare satisfier 
file path. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e2cb0ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e2cb0ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e2cb0ce

Branch: refs/heads/HDFS-10285
Commit: 9e2cb0cefbce9f9b33fa688369f8792316c2213d
Parents: 14e5b64
Author: Uma Maheswara Rao G 
Authored: Mon Jul 2 17:22:00 2018 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 09:01:26 2018 +0530

--
 .../NamenodeProtocolServerSideTranslatorPB.java |  2 +-
 .../NamenodeProtocolTranslatorPB.java   |  2 +-
 .../server/blockmanagement/BlockManager.java|  2 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 11 ---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  8 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java |  9 ---
 .../sps/BlockStorageMovementAttemptedItems.java | 72 +++--
 .../sps/BlockStorageMovementNeeded.java | 61 ++
 .../hdfs/server/namenode/sps/Context.java   | 45 ---
 .../namenode/sps/DatanodeCacheManager.java  |  4 +-
 .../hdfs/server/namenode/sps/FileCollector.java | 13 +--
 .../namenode/sps/IntraSPSNameNodeContext.java   | 54 +
 .../sps/IntraSPSNameNodeFileIdCollector.java| 14 ++--
 .../hdfs/server/namenode/sps/ItemInfo.java  | 34 
 .../hdfs/server/namenode/sps/SPSService.java| 31 +++
 .../namenode/sps/StoragePolicySatisfier.java| 61 +-
 .../sps/StoragePolicySatisfyManager.java| 20 ++---
 .../hdfs/server/protocol/NamenodeProtocol.java  |  2 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java|  4 +-
 .../hdfs/server/sps/ExternalSPSContext.java | 85 
 .../sps/ExternalSPSFilePathCollector.java   | 36 +
 .../sps/ExternalStoragePolicySatisfier.java | 30 +--
 .../src/main/proto/NamenodeProtocol.proto   |  2 +-
 .../TestBlockStorageMovementAttemptedItems.java | 16 ++--
 .../sps/TestStoragePolicySatisfier.java | 66 +--
 ...stStoragePolicySatisfierWithStripedFile.java | 41 --
 .../sps/TestExternalStoragePolicySatisfier.java | 35 +++-
 27 files changed, 346 insertions(+), 414 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e2cb0ce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index e4283c6..d9367fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -265,7 +265,7 @@ public class NamenodeProtocolServerSideTranslatorPB 
implements
   RpcController controller, GetNextSPSPathRequestProto request)
   throws ServiceException {
 try {
-  String nextSPSPath = impl.getNextSPSPath();
+  Long nextSPSPath = impl.getNextSPSPath();
   if (nextSPSPath == null) {
 return GetNextSPSPathResponseProto.newBuilder().build();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e2cb0ce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
index 97dee9b..3bd5986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
@@ -267,7 +267,7 @@ public class NamenodeProtocolTranslatorPB implements 
NamenodeProtocol,
   }
 
   @Override
-  public String getNextSPSPath() throws IOException {
+  public Long getNextSPSPath() throws IOException {
 GetNextSPSPathRequestProto req =
 GetNextSPSPathRequestProto.newBuilder().build();
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e2cb0ce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

[11/50] [abbrv] hadoop git commit: HDFS-11264: [SPS]: Double checks to ensure that SPS/Mover are not running together. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-11264: [SPS]: Double checks to ensure that SPS/Mover are not running 
together. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b78e3f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b78e3f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b78e3f3

Branch: refs/heads/HDFS-10285
Commit: 6b78e3f3762ee0b48b5a8c1521fc158887681fde
Parents: 16b58c8
Author: Uma Maheswara Rao G 
Authored: Wed Jul 12 17:56:56 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:35:34 2018 +0530

--
 .../server/namenode/StoragePolicySatisfier.java | 53 +++-
 .../namenode/TestStoragePolicySatisfier.java|  3 +-
 ...stStoragePolicySatisfierWithStripedFile.java |  5 +-
 3 files changed, 34 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b78e3f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 97cbf1b..00b4cd0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -128,6 +128,14 @@ public class StoragePolicySatisfier implements Runnable {
*/
   public synchronized void start(boolean reconfigStart) {
 isRunning = true;
+if (checkIfMoverRunning()) {
+  isRunning = false;
+  LOG.error(
+  "Stopping StoragePolicySatisfier thread " + "as Mover ID file "
+  + HdfsServerConstants.MOVER_ID_PATH.toString()
+  + " been opened. Maybe a Mover instance is running!");
+  return;
+}
 if (reconfigStart) {
   LOG.info("Starting StoragePolicySatisfier, as admin requested to "
   + "activate it.");
@@ -211,20 +219,6 @@ public class StoragePolicySatisfier implements Runnable {
 
   @Override
   public void run() {
-boolean isMoverRunning = !checkIfMoverRunning();
-synchronized (this) {
-  isRunning = isMoverRunning;
-  if (!isRunning) {
-// Stopping monitor thread and clearing queues as well
-this.clearQueues();
-this.storageMovementsMonitor.stopGracefully();
-LOG.error(
-"Stopping StoragePolicySatisfier thread " + "as Mover ID file "
-+ HdfsServerConstants.MOVER_ID_PATH.toString()
-+ " been opened. Maybe a Mover instance is running!");
-return;
-  }
-}
 while (namesystem.isRunning() && isRunning) {
   try {
 if (!namesystem.isInSafeMode()) {
@@ -274,25 +268,34 @@ public class StoragePolicySatisfier implements Runnable {
 // we want to check block movements.
 Thread.sleep(3000);
   } catch (Throwable t) {
-synchronized (this) {
+handleException(t);
+  }
+}
+  }
+
+  private void handleException(Throwable t) {
+// double check to avoid entering into synchronized block.
+if (isRunning) {
+  synchronized (this) {
+if (isRunning) {
   isRunning = false;
   // Stopping monitor thread and clearing queues as well
   this.clearQueues();
   this.storageMovementsMonitor.stopGracefully();
-}
-if (!namesystem.isRunning()) {
-  LOG.info("Stopping StoragePolicySatisfier.");
-  if (!(t instanceof InterruptedException)) {
-LOG.info("StoragePolicySatisfier received an exception"
-+ " while shutting down.", t);
+  if (!namesystem.isRunning()) {
+LOG.info("Stopping StoragePolicySatisfier.");
+if (!(t instanceof InterruptedException)) {
+  LOG.info("StoragePolicySatisfier received an exception"
+  + " while shutting down.", t);
+}
+return;
   }
-  break;
 }
-LOG.error("StoragePolicySatisfier thread received runtime exception. "
-+ "Stopping Storage policy satisfier work", t);
-break;
   }
 }
+LOG.error("StoragePolicySatisfier thread received runtime exception. "
++ "Stopping Storage policy satisfier work", t);
+return;
   }
 
   private BlocksMovingAnalysisStatus 
analyseBlocksStorageMovementsAndAssignToDN(


[41/50] [abbrv] hadoop git commit: HDFS-13077. [SPS]: Fix review comments of external storage policy satisfier. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-13077. [SPS]: Fix review comments of external storage policy satisfier. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/196903ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/196903ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/196903ca

Branch: refs/heads/HDFS-10285
Commit: 196903ca7274d695d326093393c1d88704407b31
Parents: c888189
Author: Surendra Singh Lilhore 
Authored: Mon Jan 29 23:59:55 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:50:28 2018 +0530

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  14 +-
 .../server/blockmanagement/BlockManager.java|  33 +++-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  15 ++
 .../hdfs/server/namenode/FSNamesystem.java  |  41 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  11 ++
 .../hdfs/server/namenode/sps/SPSPathIds.java|   8 +-
 .../namenode/sps/StoragePolicySatisfier.java|   6 +-
 .../hdfs/server/sps/ExternalSPSContext.java |   4 +
 .../sps/ExternalStoragePolicySatisfier.java |  30 ++-
 .../sps/TestStoragePolicySatisfier.java |   7 +-
 .../sps/TestExternalStoragePolicySatisfier.java | 195 ++-
 11 files changed, 323 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/196903ca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 0d93ff3..c79bf60 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -608,7 +608,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_MOVER_MAX_NO_MOVE_INTERVAL_KEY = 
"dfs.mover.max-no-move-interval";
   public static final intDFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; 
// One minute
 
-  // SPS related configurations
+  // StoragePolicySatisfier (SPS) related configurations
   public static final String  DFS_STORAGE_POLICY_SATISFIER_MODE_KEY =
   "dfs.storage.policy.satisfier.mode";
   public static final String DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT =
@@ -637,6 +637,18 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.storage.policy.satisfier.low.max-streams.preference";
   public static final boolean 
DFS_STORAGE_POLICY_SATISFIER_LOW_MAX_STREAMS_PREFERENCE_DEFAULT =
   true;
+  public static final String DFS_SPS_MAX_OUTSTANDING_PATHS_KEY =
+  "dfs.storage.policy.satisfier.max.outstanding.paths";
+  public static final int DFS_SPS_MAX_OUTSTANDING_PATHS_DEFAULT = 1;
+
+  // SPS keytab configurations, by default it is disabled.
+  public static final String  DFS_SPS_ADDRESS_KEY =
+  "dfs.storage.policy.satisfier.address";
+  public static final String  DFS_SPS_ADDRESS_DEFAULT= "0.0.0.0:0";
+  public static final String  DFS_SPS_KEYTAB_FILE_KEY =
+  "dfs.storage.policy.satisfier.keytab.file";
+  public static final String  DFS_SPS_KERBEROS_PRINCIPAL_KEY =
+  "dfs.storage.policy.satisfier.kerberos.principal";
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = 
"dfs.datanode.address";
   public static final int DFS_DATANODE_DEFAULT_PORT = 9866;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/196903ca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f348a33..00a91a9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -439,6 +439,7 @@ public class BlockManager implements BlockStatsMXBean {
   private final boolean storagePolicyEnabled;
   private StoragePolicySatisfierMode spsMode;
   private SPSPathIds spsPaths;
+  private final int spsOutstandingPathsLimit;
 
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
@@ -478,14 +479,16 @@ public class BlockManager implements BlockStatsMXBean {
 

[48/50] [abbrv] hadoop git commit: HDFS-13165: [SPS]: Collects successfully moved block details via IBR. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-13165: [SPS]: Collects successfully moved block details via IBR. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14e5b64f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14e5b64f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14e5b64f

Branch: refs/heads/HDFS-10285
Commit: 14e5b64f1a456e416c3425c19bbde1b26d7a2dbc
Parents: 60861a9
Author: Rakesh Radhakrishnan 
Authored: Sun Apr 29 11:06:59 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 09:01:20 2018 +0530

--
 .../DatanodeProtocolClientSideTranslatorPB.java |  11 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   4 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  25 ---
 .../server/blockmanagement/BlockManager.java|  86 +-
 .../sps/BlockMovementAttemptFinished.java   |  24 ++-
 .../common/sps/BlockStorageMovementTracker.java | 109 +---
 .../sps/BlocksMovementsStatusHandler.java   |  70 +---
 .../hdfs/server/datanode/BPServiceActor.java|  14 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   7 +-
 .../datanode/StoragePolicySatisfyWorker.java|  48 ++
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  13 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   8 +-
 .../hdfs/server/namenode/FSDirectory.java   |   5 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  30 ++--
 .../hadoop/hdfs/server/namenode/NameNode.java   |  19 ++-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  46 +++--
 .../sps/BlockStorageMovementAttemptedItems.java | 167 +--
 .../hdfs/server/namenode/sps/SPSService.java|  19 ++-
 .../namenode/sps/StoragePolicySatisfier.java| 154 +++--
 .../hdfs/server/protocol/DatanodeProtocol.java  |   4 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java|  32 ++--
 .../sps/ExternalStoragePolicySatisfier.java |   3 +-
 .../src/main/proto/DatanodeProtocol.proto   |   9 -
 .../src/main/resources/hdfs-default.xml |  41 +
 .../TestNameNodePrunesMissingStorages.java  |   4 +-
 .../datanode/InternalDataNodeTestUtils.java |   4 +-
 .../SimpleBlocksMovementsStatusHandler.java |  88 ++
 .../server/datanode/TestBPOfferService.java |  12 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   4 +-
 .../server/datanode/TestDataNodeLifeline.java   |   7 +-
 .../TestDatanodeProtocolRetryPolicy.java|   4 +-
 .../server/datanode/TestFsDatasetCache.java |   4 +-
 .../TestStoragePolicySatisfyWorker.java |  76 +
 .../hdfs/server/datanode/TestStorageReport.java |   4 +-
 .../server/namenode/NNThroughputBenchmark.java  |   9 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   4 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   5 +-
 .../namenode/TestNameNodeReconfigure.java   |  17 +-
 .../TestBlockStorageMovementAttemptedItems.java |  88 ++
 .../sps/TestStoragePolicySatisfier.java |  73 ++--
 ...stStoragePolicySatisfierWithStripedFile.java |  40 +++--
 .../sps/TestExternalStoragePolicySatisfier.java |  44 ++---
 42 files changed, 776 insertions(+), 659 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14e5b64f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index dcc0705..e4125dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -48,7 +48,6 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlock
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -139,8 +138,7 @@ public class DatanodeProtocolClientSideTranslatorPB 
implements
   VolumeFailureSummary volumeFailureSummary,
   boolean requestFullBlockReportLease,
   @Nonnull 

[26/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbac/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
new file mode 100644
index 000..c1a2b8b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
@@ -0,0 +1,580 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.sps;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Supplier;
+
+/**
+ * Tests that StoragePolicySatisfier daemon is able to check the striped blocks
+ * to be moved and finding its expected target locations in order to satisfy 
the
+ * storage policy.
+ */
+public class TestStoragePolicySatisfierWithStripedFile {
+
+  private static final Logger LOG = LoggerFactory
+  .getLogger(TestStoragePolicySatisfierWithStripedFile.class);
+
+  private final int stripesPerBlock = 2;
+
+  private ErasureCodingPolicy ecPolicy;
+  private int dataBlocks;
+  private int parityBlocks;
+  private int cellSize;
+  private int defaultStripeBlockSize;
+
+  private ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
+  /**
+   * Initialize erasure coding policy.
+   */
+  @Before
+  public void init(){
+ecPolicy = getEcPolicy();
+dataBlocks = ecPolicy.getNumDataUnits();
+parityBlocks = ecPolicy.getNumParityUnits();
+cellSize = ecPolicy.getCellSize();
+defaultStripeBlockSize = cellSize * stripesPerBlock;
+  }
+
+  /**
+   * Tests to verify that all the striped blocks(data + parity blocks) are
+   * moving to satisfy the storage policy.
+   */
+  @Test(timeout = 30)
+  public void testMoverWithFullStripe() throws Exception {
+// start 10 datanodes
+int numOfDatanodes = 10;
+int storagesPerDatanode = 2;
+long capacity = 20 * defaultStripeBlockSize;
+long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
+for (int i = 0; i < numOfDatanodes; i++) {
+  for (int j = 0; j < storagesPerDatanode; j++) {
+capacities[i][j] = capacity;
+  }
+}
+
+final Configuration conf = new HdfsConfiguration();
+conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+true);
+initConfWithStripe(conf, defaultStripeBlockSize);
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(numOfDatanodes)
+

[30/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
new file mode 100644
index 000..5635621
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
@@ -0,0 +1,572 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.sps;
+
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import 
org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier.ItemInfo;
+import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser.TraverseInfo;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * A Class to track the block collection IDs (Inode's ID) for which physical
+ * storage movement needed as per the Namespace and StorageReports from DN.
+ * It scan the pending directories for which storage movement is required and
+ * schedule the block collection IDs for movement. It track the info of
+ * scheduled items and remove the SPS xAttr from the file/Directory once
+ * movement is success.
+ */
+@InterfaceAudience.Private
+public class BlockStorageMovementNeeded {
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(BlockStorageMovementNeeded.class);
+
+  private final Queue storageMovementNeeded =
+  new LinkedList();
+
+  /**
+   * Map of startId and number of child's. Number of child's indicate the
+   * number of files pending to satisfy the policy.
+   */
+  private final Map pendingWorkForDirectory =
+  new HashMap();
+
+  private final Map spsStatus =
+  new ConcurrentHashMap<>();
+
+  private final Namesystem namesystem;
+
+  // List of pending dir to satisfy the policy
+  private final Queue spsDirsToBeTraveresed = new LinkedList();
+
+  private final StoragePolicySatisfier sps;
+
+  private Daemon inodeIdCollector;
+
+  private final int maxQueuedItem;
+
+  // Amount of time to cache the SUCCESS status of path before turning it to
+  // NOT_AVAILABLE.
+  private static long statusClearanceElapsedTimeMs = 30;
+
+  public BlockStorageMovementNeeded(Namesystem namesystem,
+  StoragePolicySatisfier sps, int queueLimit) {
+this.namesystem = namesystem;
+this.sps = sps;
+this.maxQueuedItem = queueLimit;
+  }
+
+  /**
+   * Add the candidate to tracking list for which storage movement
+   * expected if necessary.
+   *
+   * @param trackInfo
+   *  - track info for satisfy the policy
+   */
+  public synchronized void add(ItemInfo trackInfo) {
+spsStatus.put(trackInfo.getStartId(),
+new StoragePolicySatisfyPathStatusInfo(
+StoragePolicySatisfyPathStatus.IN_PROGRESS));
+storageMovementNeeded.add(trackInfo);
+  }
+
+  /**
+   * Add the itemInfo to tracking list for which storage movement
+   * expected if necessary.
+   * @param startId
+   *- start id
+  

[06/50] [abbrv] hadoop git commit: HDFS-11762. [SPS]: Empty files should be ignored in StoragePolicySatisfier. Surendra Singh Lilhore.

2018-07-04 Thread rakeshr
HDFS-11762. [SPS]: Empty files should be ignored in StoragePolicySatisfier. 
Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35a6fde9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35a6fde9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35a6fde9

Branch: refs/heads/HDFS-10285
Commit: 35a6fde9f0c0819b099b9ecd94ed63068266c60a
Parents: 08a199f
Author: Rakesh Radhakrishnan 
Authored: Mon Jun 5 12:32:41 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:35:08 2018 +0530

--
 .../namenode/FSDirSatisfyStoragePolicyOp.java   | 15 ++---
 .../namenode/TestStoragePolicySatisfier.java| 32 
 2 files changed, 42 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35a6fde9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
index 81d337f..bd4e5ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
@@ -51,7 +51,6 @@ final class FSDirSatisfyStoragePolicyOp {
 
 assert fsd.getFSNamesystem().hasWriteLock();
 FSPermissionChecker pc = fsd.getPermissionChecker();
-List xAttrs = Lists.newArrayListWithCapacity(1);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -62,8 +61,11 @@ final class FSDirSatisfyStoragePolicyOp {
 fsd.checkPathAccess(pc, iip, FsAction.WRITE);
   }
   XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
-  xAttrs.add(satisfyXAttr);
-  fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+  if (satisfyXAttr != null) {
+List xAttrs = Lists.newArrayListWithCapacity(1);
+xAttrs.add(satisfyXAttr);
+fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+  }
 } finally {
   fsd.writeUnlock();
 }
@@ -79,16 +81,19 @@ final class FSDirSatisfyStoragePolicyOp {
 
 // TODO: think about optimization here, label the dir instead
 // of the sub-files of the dir.
-if (inode.isFile()) {
+if (inode.isFile() && inode.asFile().numBlocks() != 0) {
   candidateNodes.add(inode);
 } else if (inode.isDirectory()) {
   for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
-if (node.isFile()) {
+if (node.isFile() && node.asFile().numBlocks() != 0) {
   candidateNodes.add(node);
 }
   }
 }
 
+if (candidateNodes.isEmpty()) {
+  return null;
+}
 // If node has satisfy xattr, then stop adding it
 // to satisfy movement queue.
 if (inodeHasSatisfyXAttr(candidateNodes)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35a6fde9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index fa954b8..8e08a1e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -907,6 +907,38 @@ public class TestStoragePolicySatisfier {
 }
   }
 
+  /**
+   * Test SPS with empty file.
+   * 1. Create one empty file.
+   * 2. Call satisfyStoragePolicy for empty file.
+   * 3. SPS should skip this file and xattr should not be added for empty file.
+   */
+  @Test(timeout = 30)
+  public void testSPSWhenFileLengthIsZero() throws Exception {
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(0)
+  .build();
+  cluster.waitActive();
+  DistributedFileSystem fs = cluster.getFileSystem();
+  Path filePath = new Path("/zeroSizeFile");
+  DFSTestUtil.createFile(fs, filePath, 0, (short) 1, 0);
+  FSEditLog editlog = cluster.getNameNode().getNamesystem().getEditLog();
+  long lastWrittenTxId = editlog.getLastWrittenTxId();
+  

[24/50] [abbrv] hadoop git commit: HDFS-12790: [SPS]: Rebasing HDFS-10285 branch after HDFS-10467, HDFS-12599 and HDFS-11968 commits. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-12790: [SPS]: Rebasing HDFS-10285 branch after HDFS-10467, HDFS-12599 and 
HDFS-11968 commits. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1b810eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1b810eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1b810eb

Branch: refs/heads/HDFS-10285
Commit: d1b810eba719e3e4c62d0e3f0ff2877412536688
Parents: 6016d28
Author: Rakesh Radhakrishnan 
Authored: Fri Nov 10 10:06:43 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:46:22 2018 +0530

--
 .../federation/router/RouterRpcServer.java  |  19 +++
 .../namenode/TestStoragePolicySatisfier.java|   9 +-
 ...stStoragePolicySatisfierWithStripedFile.java |  21 +--
 .../hdfs/tools/TestStoragePolicyCommands.java   |  57 -
 .../TestStoragePolicySatisfyAdminCommands.java  | 127 +++
 5 files changed, 162 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1b810eb/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 716ebee..e078c49 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -87,6 +87,7 @@ import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
@@ -2513,4 +2514,22 @@ public class RouterRpcServer extends AbstractService
   public FederationRPCMetrics getRPCMetrics() {
 return this.rpcMonitor.getRPCMetrics();
   }
+
+  @Override
+  public void satisfyStoragePolicy(String path) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override
+  public boolean isStoragePolicySatisfierRunning() throws IOException {
+checkOperation(OperationCategory.READ, false);
+return false;
+  }
+
+  @Override
+  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
+  String path) throws IOException {
+checkOperation(OperationCategory.READ, false);
+return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1b810eb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index f42d911..edd1aca 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -61,6 +61,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.Assert;
@@ -912,8 +913,6 @@ public class TestStoragePolicySatisfier {
 
 int defaultStripedBlockSize =
 StripedFileTestUtil.getDefaultECPolicy().getCellSize() * 4;
-config.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
-StripedFileTestUtil.getDefaultECPolicy().getName());
 config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
 config.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
 

[29/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbac/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
deleted file mode 100644
index 9f733ff..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ /dev/null
@@ -1,1775 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.slf4j.LoggerFactory.getLogger;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.ReconfigurationException;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
-import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
-import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.event.Level;
-
-import com.google.common.base.Supplier;
-
-/**
- * Tests that StoragePolicySatisfier daemon is able to check the blocks to be
- * moved and finding its suggested target locations to move.
- */
-public class TestStoragePolicySatisfier {
-
-  {
-GenericTestUtils.setLogLevel(
-getLogger(FSTreeTraverser.class), Level.DEBUG);
-  }
-
-  private static final String ONE_SSD = "ONE_SSD";
-  private static final String COLD = "COLD";
-  private static final Logger LOG =
-  LoggerFactory.getLogger(TestStoragePolicySatisfier.class);
-  private final Configuration config = new HdfsConfiguration();
-  private StorageType[][] allDiskTypes =
-  new StorageType[][]{{StorageType.DISK, StorageType.DISK},
-  {StorageType.DISK, StorageType.DISK},
-  {StorageType.DISK, StorageType.DISK}};
-  private MiniDFSCluster hdfsCluster = null;
-  final private int numOfDatanodes = 3;
-  

[44/50] [abbrv] hadoop git commit: HDFS-13110: [SPS]: Reduce the number of APIs in NamenodeProtocol used by external satisfier. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8947d989/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
deleted file mode 100644
index ff277ba..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
+++ /dev/null
@@ -1,174 +0,0 @@
-package org.apache.hadoop.hdfs.server.sps;
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.ArrayList;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.sps.Context;
-import org.apache.hadoop.hdfs.server.namenode.sps.FileIdCollector;
-import org.apache.hadoop.hdfs.server.namenode.sps.ItemInfo;
-import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is to scan the paths recursively. If file is directory, then it
- * will scan for files recursively. If the file is non directory, then it will
- * just submit the same file to process.
- */
-@InterfaceAudience.Private
-public class ExternalSPSFileIDCollector implements FileIdCollector {
-  public static final Logger LOG =
-  LoggerFactory.getLogger(ExternalSPSFileIDCollector.class);
-  private Context cxt;
-  private DistributedFileSystem dfs;
-  private SPSService service;
-  private int maxQueueLimitToScan;
-
-  public ExternalSPSFileIDCollector(Context cxt, SPSService service) {
-this.cxt = cxt;
-this.service = service;
-this.maxQueueLimitToScan = service.getConf().getInt(
-DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY,
-DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_DEFAULT);
-try {
-  // TODO: probably we could get this dfs from external context? but this 
is
-  // too specific to external.
-  dfs = getFS(service.getConf());
-} catch (IOException e) {
-  LOG.error("Unable to get the filesystem. Make sure Namenode running and "
-  + "configured namenode address is correct.", e);
-}
-  }
-
-  private DistributedFileSystem getFS(Configuration conf) throws IOException {
-return (DistributedFileSystem) FileSystem
-.get(FileSystem.getDefaultUri(conf), conf);
-  }
-
-  /**
-   * Recursively scan the given path and add the file info to SPS service for
-   * processing.
-   */
-  private long processPath(long startID, String fullPath) {
-long pendingWorkCount = 0; // to be satisfied file counter
-for (byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;;) {
-  final DirectoryListing children;
-  try {
-children = dfs.getClient().listPaths(fullPath, lastReturnedName, 
false);
-  } catch (IOException e) {
-LOG.warn("Failed to list directory " + fullPath
-+ ". Ignore the directory and continue.", e);
-return pendingWorkCount;
-  }
-  if (children == null) {
-if (LOG.isDebugEnabled()) {
-  LOG.debug("The scanning start dir/sub dir " + fullPath
-  + " does not have childrens.");
-}
-return pendingWorkCount;
-  }
-
-  for (HdfsFileStatus child : children.getPartialListing()) {
-if (child.isFile()) {
-  service.addFileIdToProcess(new ItemInfo(startID, child.getFileId()),
-  false);
-  checkProcessingQueuesFree();
-  pendingWorkCount++; // increment to be satisfied file count
-} else {
-  String fullPathStr = child.getFullName(fullPath);
- 

[35/50] [abbrv] hadoop git commit: HDFS-13025. [SPS]: Implement a mechanism to scan the files for external SPS. Contributed by Uma Maheswara Rao G.

2018-07-04 Thread rakeshr
HDFS-13025. [SPS]: Implement a mechanism to scan the files for external SPS. 
Contributed by Uma Maheswara Rao G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55d0b1d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55d0b1d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55d0b1d9

Branch: refs/heads/HDFS-10285
Commit: 55d0b1d99c62329b569aaac7f5a3f3f247ee539d
Parents: 2412c34
Author: Rakesh Radhakrishnan 
Authored: Tue Jan 23 20:09:26 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:47:03 2018 +0530

--
 .../sps/BlockStorageMovementNeeded.java |  70 +++-
 .../hdfs/server/namenode/sps/Context.java   |   8 +
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |   2 +
 .../namenode/sps/IntraSPSNameNodeContext.java   |   7 +
 .../sps/IntraSPSNameNodeFileIdCollector.java|   6 +-
 .../hdfs/server/namenode/sps/SPSService.java|  10 +-
 .../namenode/sps/StoragePolicySatisfier.java|   8 +-
 .../server/sps/ExternalSPSFileIDCollector.java  | 156 +
 .../hadoop/hdfs/server/sps/package-info.java|  28 ++
 .../sps/TestStoragePolicySatisfier.java | 323 ++-
 .../sps/TestExternalStoragePolicySatisfier.java | 108 +++
 11 files changed, 556 insertions(+), 170 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55d0b1d9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
index 39a0051..b141502 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
@@ -97,23 +97,53 @@ public class BlockStorageMovementNeeded {
   }
 
   /**
-   * Add the itemInfo to tracking list for which storage movement
-   * expected if necessary.
+   * Add the itemInfo list to tracking list for which storage movement expected
+   * if necessary.
+   *
* @param startId
-   *- start id
+   *  - start id
* @param itemInfoList
-   *- List of child in the directory
+   *  - List of child in the directory
+   * @param scanCompleted
+   *  -Indicates whether the start id directory has no more elements to
+   *  scan.
*/
   @VisibleForTesting
-  public synchronized void addAll(long startId,
-  List itemInfoList, boolean scanCompleted) {
+  public synchronized void addAll(long startId, List itemInfoList,
+  boolean scanCompleted) {
 storageMovementNeeded.addAll(itemInfoList);
+updatePendingDirScanStats(startId, itemInfoList.size(), scanCompleted);
+  }
+
+  /**
+   * Add the itemInfo to tracking list for which storage movement expected if
+   * necessary.
+   *
+   * @param itemInfoList
+   *  - List of child in the directory
+   * @param scanCompleted
+   *  -Indicates whether the ItemInfo start id directory has no more
+   *  elements to scan.
+   */
+  @VisibleForTesting
+  public synchronized void add(ItemInfo itemInfo, boolean scanCompleted) {
+storageMovementNeeded.add(itemInfo);
+// This represents sps start id is file, so no need to update pending dir
+// stats.
+if (itemInfo.getStartId() == itemInfo.getFileId()) {
+  return;
+}
+updatePendingDirScanStats(itemInfo.getStartId(), 1, scanCompleted);
+  }
+
+  private void updatePendingDirScanStats(long startId, int numScannedFiles,
+  boolean scanCompleted) {
 DirPendingWorkInfo pendingWork = pendingWorkForDirectory.get(startId);
 if (pendingWork == null) {
   pendingWork = new DirPendingWorkInfo();
   pendingWorkForDirectory.put(startId, pendingWork);
 }
-pendingWork.addPendingWorkCount(itemInfoList.size());
+pendingWork.addPendingWorkCount(numScannedFiles);
 if (scanCompleted) {
   pendingWork.markScanCompleted();
 }
@@ -250,13 +280,15 @@ public class BlockStorageMovementNeeded {
 
 @Override
 public void run() {
-  LOG.info("Starting FileInodeIdCollector!.");
+  LOG.info("Starting SPSPathIdProcessor!.");
   long lastStatusCleanTime = 0;
+  Long startINodeId = null;
   while (ctxt.isRunning()) {
-LOG.info("Running FileInodeIdCollector!.");
 try {
   if (!ctxt.isInSafeMode()) {
-Long startINodeId = ctxt.getNextSPSPathId();
+if (startINodeId 

[36/50] [abbrv] hadoop git commit: HDFS-13033: [SPS]: Implement a mechanism to do file block movements for external SPS. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-13033: [SPS]: Implement a mechanism to do file block movements for 
external SPS. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49ee352c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49ee352c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49ee352c

Branch: refs/heads/HDFS-10285
Commit: 49ee352cc88169e8abe559c4a3c44f37c80d7f04
Parents: 55d0b1d
Author: Uma Maheswara Rao G 
Authored: Tue Jan 23 16:19:46 2018 -0800
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:47:12 2018 +0530

--
 .../hdfs/server/balancer/NameNodeConnector.java |   8 +
 .../hdfs/server/common/sps/BlockDispatcher.java | 186 +
 .../sps/BlockMovementAttemptFinished.java   |  80 ++
 .../server/common/sps/BlockMovementStatus.java  |  53 
 .../common/sps/BlockStorageMovementTracker.java | 184 +
 .../sps/BlocksMovementsStatusHandler.java   |  95 +++
 .../hdfs/server/common/sps/package-info.java|  27 ++
 .../datanode/BlockStorageMovementTracker.java   | 186 -
 .../datanode/StoragePolicySatisfyWorker.java| 271 ++-
 .../hdfs/server/namenode/FSNamesystem.java  |   4 +-
 .../namenode/sps/BlockMoveTaskHandler.java  |   3 +-
 .../sps/BlockStorageMovementAttemptedItems.java |  12 +-
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |   3 +-
 .../hdfs/server/namenode/sps/SPSService.java|  14 +-
 .../namenode/sps/StoragePolicySatisfier.java|  30 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java| 233 
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../sps/TestStoragePolicySatisfier.java |   6 +-
 .../sps/TestExternalStoragePolicySatisfier.java |  69 -
 19 files changed, 997 insertions(+), 469 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49ee352c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
index b0dd779..6bfbbb3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
@@ -269,6 +269,14 @@ public class NameNodeConnector implements Closeable {
 }
   }
 
+  /**
+   * Returns fallbackToSimpleAuth. This will be true or false during calls to
+   * indicate if a secure client falls back to simple auth.
+   */
+  public AtomicBoolean getFallbackToSimpleAuth() {
+return fallbackToSimpleAuth;
+  }
+
   @Override
   public void close() {
 keyManager.close();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49ee352c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
new file mode 100644
index 000..f87fcae
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.common.sps;
+
+import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import 

[49/50] [abbrv] hadoop git commit: HDFS-13381 : [SPS]: Use DFSUtilClient#makePathFromFileId() to prepare satisfier file path. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e2cb0ce/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
index b05717a..ec5307b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
@@ -108,8 +108,6 @@ public class TestStoragePolicySatisfier {
   public static final long CAPACITY = 2 * 256 * 1024 * 1024;
   public static final String FILE = "/testMoveToSatisfyStoragePolicy";
   public static final int DEFAULT_BLOCK_SIZE = 1024;
-  private ExternalBlockMovementListener blkMoveListener =
-  new ExternalBlockMovementListener();
 
   /**
* Sets hdfs cluster.
@@ -1282,8 +1280,8 @@ public class TestStoragePolicySatisfier {
 
 //Queue limit can control the traverse logic to wait for some free
 //entry in queue. After 10 files, traverse control will be on U.
-StoragePolicySatisfier sps = new 
StoragePolicySatisfier(config);
-Context ctxt = new IntraSPSNameNodeContext(
+StoragePolicySatisfier sps = new StoragePolicySatisfier(config);
+Context ctxt = new IntraSPSNameNodeContext(
 hdfsCluster.getNamesystem(),
 hdfsCluster.getNamesystem().getBlockManager(), sps) {
   @Override
@@ -1297,8 +1295,7 @@ public class TestStoragePolicySatisfier {
   }
 };
 
-FileCollector fileIDCollector = createFileIdCollector(sps, ctxt);
-sps.init(ctxt, fileIDCollector, null, null);
+sps.init(ctxt);
 sps.getStorageMovementQueue().activate();
 
 INode rootINode = fsDir.getINode("/root");
@@ -1314,13 +1311,6 @@ public class TestStoragePolicySatisfier {
 dfs.delete(new Path("/root"), true);
   }
 
-  public FileCollector createFileIdCollector(
-  StoragePolicySatisfier sps, Context ctxt) {
-FileCollector fileIDCollector = new IntraSPSNameNodeFileIdCollector(
-hdfsCluster.getNamesystem().getFSDirectory(), sps);
-return fileIDCollector;
-  }
-
   /**
*  Test traverse when root parent got deleted.
*  1. Delete L when traversing Q
@@ -1351,8 +1341,8 @@ public class TestStoragePolicySatisfier {
 
 // Queue limit can control the traverse logic to wait for some free
 // entry in queue. After 10 files, traverse control will be on U.
-StoragePolicySatisfier sps = new 
StoragePolicySatisfier(config);
-Context ctxt = new IntraSPSNameNodeContext(
+StoragePolicySatisfier sps = new StoragePolicySatisfier(config);
+Context ctxt = new IntraSPSNameNodeContext(
 hdfsCluster.getNamesystem(),
 hdfsCluster.getNamesystem().getBlockManager(), sps) {
   @Override
@@ -1365,8 +1355,7 @@ public class TestStoragePolicySatisfier {
 return true;
   }
 };
-FileCollector fileIDCollector = createFileIdCollector(sps, ctxt);
-sps.init(ctxt, fileIDCollector, null, null);
+sps.init(ctxt);
 sps.getStorageMovementQueue().activate();
 
 INode rootINode = fsDir.getINode("/root");
@@ -1383,12 +1372,12 @@ public class TestStoragePolicySatisfier {
   }
 
   private void assertTraversal(List expectedTraverseOrder,
-  FSDirectory fsDir, StoragePolicySatisfier sps)
+  FSDirectory fsDir, StoragePolicySatisfier sps)
   throws InterruptedException {
 // Remove 10 element and make queue free, So other traversing will start.
 for (int i = 0; i < 10; i++) {
   String path = expectedTraverseOrder.remove(0);
-  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
+  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
   if (itemInfo == null) {
 continue;
   }
@@ -1403,7 +1392,7 @@ public class TestStoragePolicySatisfier {
 // Check other element traversed in order and E, M, U, R, S should not be
 // added in queue which we already removed from expected list
 for (String path : expectedTraverseOrder) {
-  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
+  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
   if (itemInfo == null) {
 continue;
   }
@@ -1717,17 +1706,17 @@ public class TestStoragePolicySatisfier {
   public void waitForAttemptedItems(long expectedBlkMovAttemptedCount,
   int timeout) throws TimeoutException, InterruptedException {
 BlockManager blockManager = hdfsCluster.getNamesystem().getBlockManager();
-final StoragePolicySatisfier sps =
-(StoragePolicySatisfier) blockManager.getSPSManager()
+final StoragePolicySatisfier sps =
+

[43/50] [abbrv] hadoop git commit: HDFS-13097: [SPS]: Fix the branch review comments(Part1). Contributed by Surendra Singh.

2018-07-04 Thread rakeshr
HDFS-13097: [SPS]: Fix the branch review comments(Part1). Contributed by 
Surendra Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f26fca8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f26fca8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f26fca8b

Branch: refs/heads/HDFS-10285
Commit: f26fca8b118076cc089b21777db3a0d874b2038e
Parents: 196903c
Author: Uma Maheswara Rao G 
Authored: Wed Feb 7 02:28:23 2018 -0800
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 09:01:01 2018 +0530

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   4 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java|   6 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  14 +-
 .../src/main/proto/ClientNamenodeProtocol.proto |   8 +-
 .../federation/router/RouterRpcServer.java  |   2 +-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  61 ---
 ...tNamenodeProtocolServerSideTranslatorPB.java |  16 +-
 .../server/blockmanagement/BlockManager.java| 255 +---
 .../blockmanagement/DatanodeDescriptor.java |  33 +-
 .../hdfs/server/common/HdfsServerConstants.java |   2 +-
 .../datanode/StoragePolicySatisfyWorker.java|  15 +-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   2 +-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  26 +-
 .../server/namenode/FSDirStatAndListingOp.java  |   1 -
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   2 +-
 .../hdfs/server/namenode/FSDirectory.java   |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  46 +--
 .../hadoop/hdfs/server/namenode/NameNode.java   |  30 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  21 +-
 .../sps/BlockStorageMovementNeeded.java |   4 +-
 .../namenode/sps/IntraSPSNameNodeContext.java   |   6 +-
 .../hdfs/server/namenode/sps/SPSPathIds.java|  70 
 .../hdfs/server/namenode/sps/SPSService.java|  10 +-
 .../namenode/sps/StoragePolicySatisfier.java| 137 ---
 .../sps/StoragePolicySatisfyManager.java| 399 +++
 .../sps/ExternalStoragePolicySatisfier.java |   2 +-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |   2 +-
 .../namenode/TestNameNodeReconfigure.java   |  19 +-
 .../TestPersistentStoragePolicySatisfier.java   |   3 +-
 .../TestStoragePolicySatisfierWithHA.java   |   6 +-
 .../sps/TestStoragePolicySatisfier.java |  35 +-
 ...stStoragePolicySatisfierWithStripedFile.java |   6 +-
 .../sps/TestExternalStoragePolicySatisfier.java |  24 +-
 33 files changed, 665 insertions(+), 604 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f26fca8b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 471ab2c..b6f9bdd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3110,8 +3110,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public boolean isStoragePolicySatisfierRunning() throws IOException {
-return namenode.isStoragePolicySatisfierRunning();
+  public boolean isInternalSatisfierRunning() throws IOException {
+return namenode.isInternalSatisfierRunning();
   }
 
   Tracer getTracer() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f26fca8b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 360fd63..5c51c22 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1759,12 +1759,12 @@ public interface ClientProtocol {
   void satisfyStoragePolicy(String path) throws IOException;
 
   /**
-   * Check if StoragePolicySatisfier is running.
-   * @return true if StoragePolicySatisfier is running
+   * Check if internal StoragePolicySatisfier is running.
+   * @return true if internal StoragePolicySatisfier is running
* @throws IOException
*/
   @Idempotent
-  boolean isStoragePolicySatisfierRunning() throws IOException;
+  boolean 

[23/50] [abbrv] hadoop git commit: HDFS-12310: [SPS]: Provide an option to track the status of in progress requests. Contributed by Surendra Singh Lilhore.

2018-07-04 Thread rakeshr
HDFS-12310: [SPS]: Provide an option to track the status of in progress 
requests. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6016d283
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6016d283
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6016d283

Branch: refs/heads/HDFS-10285
Commit: 6016d283f0f083fd3a72fac0f492bd8262cf56d3
Parents: ce8e901
Author: Rakesh Radhakrishnan 
Authored: Fri Nov 3 08:18:14 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:44:03 2018 +0530

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  22 
 .../hadoop/hdfs/protocol/ClientProtocol.java|  21 
 .../hadoop/hdfs/protocol/HdfsConstants.java |  27 +
 .../ClientNamenodeProtocolTranslatorPB.java |  20 
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  33 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  17 ++-
 ...tNamenodeProtocolServerSideTranslatorPB.java |  23 +++-
 .../server/blockmanagement/BlockManager.java|  12 ++
 .../namenode/BlockStorageMovementNeeded.java| 109 +++
 .../hdfs/server/namenode/NameNodeRpcServer.java |  13 ++-
 .../server/namenode/StoragePolicySatisfier.java |   8 ++
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  35 +-
 .../src/site/markdown/ArchivalStorage.md|   3 +-
 .../TestPersistentStoragePolicySatisfier.java   |   2 +-
 .../namenode/TestStoragePolicySatisfier.java|  67 
 .../hdfs/tools/TestStoragePolicyCommands.java   |  18 +++
 16 files changed, 424 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 7337aa2..471ab2c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -123,6 +123,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
@@ -3169,4 +3170,25 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 checkOpen();
 return new OpenFilesIterator(namenode, tracer, openFilesTypes, path);
   }
+
+  /**
+   * Check the storage policy satisfy status of the path for which
+   * {@link DFSClient#satisfyStoragePolicy(String)} is called.
+   *
+   * @return Storage policy satisfy status.
+   * 
+   * PENDING if path is in queue and not processed for satisfying
+   * the policy.
+   * IN_PROGRESS if satisfying the storage policy for path.
+   * SUCCESS if storage policy satisfied for the path.
+   * NOT_AVAILABLE if
+   * {@link DFSClient#satisfyStoragePolicy(String)} not called for
+   * path or SPS work is already finished.
+   * 
+   * @throws IOException
+   */
+  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
+  String path) throws IOException {
+return namenode.checkStoragePolicySatisfyPathStatus(path);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 81d7c91..360fd63 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.inotify.EventBatchList;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 

[39/50] [abbrv] hadoop git commit: HDFS-13075. [SPS]: Provide External Context implementation. Contributed by Uma Maheswara Rao G.

2018-07-04 Thread rakeshr
HDFS-13075. [SPS]: Provide External Context implementation. Contributed by Uma 
Maheswara Rao G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3272839
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3272839
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3272839

Branch: refs/heads/HDFS-10285
Commit: e3272839c5587f9a010f792103c53bda8d8c4197
Parents: 96fb185
Author: Surendra Singh Lilhore 
Authored: Sun Jan 28 20:46:56 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:50:15 2018 +0530

--
 .../NamenodeProtocolServerSideTranslatorPB.java |  67 +
 .../NamenodeProtocolTranslatorPB.java   |  58 
 .../hdfs/server/balancer/NameNodeConnector.java |  28 +-
 .../server/blockmanagement/BlockManager.java|  19 ++
 .../server/blockmanagement/DatanodeManager.java |  18 ++
 .../hdfs/server/common/HdfsServerConstants.java |   3 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  53 +++-
 .../sps/BlockStorageMovementNeeded.java |   8 +-
 .../hdfs/server/namenode/sps/Context.java   |   9 +-
 .../namenode/sps/IntraSPSNameNodeContext.java   |  23 +-
 .../namenode/sps/StoragePolicySatisfier.java|  15 +-
 .../hdfs/server/protocol/NamenodeProtocol.java  |  46 +++-
 .../hdfs/server/sps/ExternalSPSContext.java | 271 +++
 .../src/main/proto/NamenodeProtocol.proto   |  57 
 .../sps/TestExternalStoragePolicySatisfier.java |  31 +--
 15 files changed, 652 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3272839/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index 90c2c49..25eafdf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
@@ -33,10 +35,16 @@ import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksReq
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto;
 import 

[47/50] [abbrv] hadoop git commit: HDFS-13165: [SPS]: Collects successfully moved block details via IBR. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/14e5b64f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
index 7580ba9..f5225d2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
@@ -20,13 +20,10 @@ package org.apache.hadoop.hdfs.server.sps;
 
 import java.io.IOException;
 import java.net.Socket;
-import java.util.ArrayList;
-import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
 import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
@@ -39,7 +36,6 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
@@ -48,15 +44,14 @@ import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.balancer.KeyManager;
 import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
+import org.apache.hadoop.hdfs.server.common.sps.BlockDispatcher;
 import org.apache.hadoop.hdfs.server.common.sps.BlockMovementAttemptFinished;
 import org.apache.hadoop.hdfs.server.common.sps.BlockMovementStatus;
 import org.apache.hadoop.hdfs.server.common.sps.BlockStorageMovementTracker;
 import org.apache.hadoop.hdfs.server.common.sps.BlocksMovementsStatusHandler;
-import org.apache.hadoop.hdfs.server.common.sps.BlockDispatcher;
 import org.apache.hadoop.hdfs.server.namenode.sps.BlockMoveTaskHandler;
 import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
 import 
org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Daemon;
 import org.slf4j.Logger;
@@ -105,12 +100,14 @@ public class ExternalSPSBlockMoveTaskHandler implements 
BlockMoveTaskHandler {
 int ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf);
 blkDispatcher = new BlockDispatcher(HdfsConstants.READ_TIMEOUT,
 ioFileBufferSize, connectToDnViaHostname);
+
+startMovementTracker();
   }
 
   /**
* Initializes block movement tracker daemon and starts the thread.
*/
-  public void init() {
+  private void startMovementTracker() {
 movementTrackerThread = new Daemon(this.blkMovementTracker);
 movementTrackerThread.setName("BlockStorageMovementTracker");
 movementTrackerThread.start();
@@ -156,24 +153,16 @@ public class ExternalSPSBlockMoveTaskHandler implements 
BlockMoveTaskHandler {
 // dn.incrementBlocksScheduled(blkMovingInfo.getTargetStorageType());
 LOG.debug("Received BlockMovingTask {}", blkMovingInfo);
 BlockMovingTask blockMovingTask = new BlockMovingTask(blkMovingInfo);
-Future moveCallable = mCompletionServ
-.submit(blockMovingTask);
-blkMovementTracker.addBlock(blkMovingInfo.getBlock(), moveCallable);
+mCompletionServ.submit(blockMovingTask);
   }
 
   private class ExternalBlocksMovementsStatusHandler
-  extends BlocksMovementsStatusHandler {
+  implements BlocksMovementsStatusHandler {
 @Override
-public void handle(
-List moveAttemptFinishedBlks) {
-  List blocks = new ArrayList<>();
-  for (BlockMovementAttemptFinished item : moveAttemptFinishedBlks) {
-blocks.add(item.getBlock());
-  }
-  BlocksStorageMoveAttemptFinished blkAttempted =
-  new BlocksStorageMoveAttemptFinished(
-  blocks.toArray(new Block[blocks.size()]));
-  service.notifyStorageMovementAttemptFinishedBlks(blkAttempted);
+public void handle(BlockMovementAttemptFinished attemptedMove) {
+  service.notifyStorageMovementAttemptFinishedBlk(
+  attemptedMove.getTargetDatanode(), attemptedMove.getTargetType(),
+  attemptedMove.getBlock());
 }
   }
 
@@ -194,6 +183,7 @@ public class 

[40/50] [abbrv] hadoop git commit: HDFS-13050: [SPS]: Create start/stop script to start external SPS process. Contributed by Surendra Singh Lilhore.

2018-07-04 Thread rakeshr
HDFS-13050: [SPS]: Create start/stop script to start external SPS process. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8881895
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8881895
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8881895

Branch: refs/heads/HDFS-10285
Commit: c888189584bdc2eba981be5bcfa002a9ca2df096
Parents: e327283
Author: Rakesh Radhakrishnan 
Authored: Mon Jan 29 03:10:48 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:50:21 2018 +0530

--
 .../hadoop-hdfs/src/main/bin/hdfs   |   5 +
 .../server/blockmanagement/BlockManager.java|   9 ++
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   2 +-
 .../hdfs/server/namenode/sps/Context.java   |   5 -
 .../namenode/sps/IntraSPSNameNodeContext.java   |   4 -
 .../sps/IntraSPSNameNodeFileIdCollector.java|  12 +-
 .../hdfs/server/namenode/sps/SPSPathIds.java|   1 +
 .../namenode/sps/StoragePolicySatisfier.java|  83 +++-
 .../sps/ExternalSPSBlockMoveTaskHandler.java|   2 +-
 .../hdfs/server/sps/ExternalSPSContext.java |  57 +---
 .../server/sps/ExternalSPSFileIDCollector.java  |  12 +-
 .../sps/ExternalStoragePolicySatisfier.java | 130 +++
 .../src/site/markdown/ArchivalStorage.md|  10 +-
 .../sps/TestStoragePolicySatisfier.java |  22 ++--
 .../sps/TestExternalStoragePolicySatisfier.java |  33 +++--
 15 files changed, 259 insertions(+), 128 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8881895/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index bc6e7a4..94426a5 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -63,6 +63,7 @@ function hadoop_usage
   hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary 
namenode"
   hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a 
directory or diff the current directory contents with a snapshot"
   hadoop_add_subcommand "storagepolicies" admin 
"list/get/set/satisfyStoragePolicy block storage policies"
+  hadoop_add_subcommand "sps" daemon "run external storagepolicysatisfier"
   hadoop_add_subcommand "version" client "print the version"
   hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
@@ -201,6 +202,10 @@ function hdfscmd_case
 storagepolicies)
   HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
 ;;
+sps)
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+  
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.sps.ExternalStoragePolicySatisfier
+;;
 version)
   HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8881895/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index daaa7a3..f348a33 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -94,6 +94,9 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import 
org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeBlockMoveTaskHandler;
+import org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeContext;
+import 
org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeFileIdCollector;
 import org.apache.hadoop.hdfs.server.namenode.sps.SPSPathIds;
 import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
@@ -5098,9 +5101,15 @@ public class BlockManager implements BlockStatsMXBean {
   return;
 }
 updateSPSMode(StoragePolicySatisfierMode.INTERNAL);
+sps.init(new IntraSPSNameNodeContext(this.namesystem, this, sps),
+new IntraSPSNameNodeFileIdCollector(this.namesystem.getFSDirectory(),
+sps),
+new 

[37/50] [abbrv] hadoop git commit: HDFS-13057: [SPS]: Revisit configurations to make SPS service modes internal/external/none. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fb1858/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
index 0e3a5a3..2257608 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
@@ -103,8 +104,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 }
 
 final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)
@@ -216,8 +217,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 }
 
 final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)
@@ -328,8 +329,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 conf.set(DFSConfigKeys
 .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
 "3000");
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)
@@ -420,8 +421,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 }
 
 final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fb1858/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
index 9a401bd..42b04da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.sps.BlockMovementListener;
@@ -54,12 +55,19 @@ public class TestExternalStoragePolicySatisfier
   new StorageType[][]{{StorageType.DISK, StorageType.DISK},
   {StorageType.DISK, StorageType.DISK},
   {StorageType.DISK, StorageType.DISK}};

[32/50] [abbrv] hadoop git commit: HDFS-12982 : [SPS]: Reduce the locking and cleanup the Namesystem access. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-12982 : [SPS]: Reduce the locking and cleanup the Namesystem access. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f4c8c15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f4c8c15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f4c8c15

Branch: refs/heads/HDFS-10285
Commit: 8f4c8c1564cec36791bb01d0735c06705ecf0de4
Parents: dba
Author: Surendra Singh Lilhore 
Authored: Mon Jan 8 15:13:11 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:46:40 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|  16 +-
 .../blockmanagement/DatanodeDescriptor.java |   2 +-
 .../server/blockmanagement/DatanodeManager.java |  22 ++
 .../server/namenode/FSDirStatAndListingOp.java  |   1 +
 .../hdfs/server/namenode/FSNamesystem.java  |  44 ++-
 .../hdfs/server/namenode/IntraNNSPSContext.java |  41 --
 .../hadoop/hdfs/server/namenode/Namesystem.java |  24 ++
 .../sps/BlockStorageMovementAttemptedItems.java |  17 +-
 .../sps/BlockStorageMovementNeeded.java |  48 ++-
 .../hdfs/server/namenode/sps/Context.java   | 181 +
 .../namenode/sps/IntraSPSNameNodeContext.java   | 220 +++
 .../namenode/sps/StoragePolicySatisfier.java| 374 +--
 .../TestBlockStorageMovementAttemptedItems.java |  17 +-
 .../sps/TestStoragePolicySatisfier.java |  25 +-
 14 files changed, 742 insertions(+), 290 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f4c8c15/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 1cf687e..c2d5162 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,11 +89,12 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
-import org.apache.hadoop.hdfs.server.namenode.IntraNNSPSContext;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.namenode.sps.Context;
+import org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeContext;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
@@ -433,6 +434,7 @@ public class BlockManager implements BlockStatsMXBean {
   private final StoragePolicySatisfier sps;
   private final boolean storagePolicyEnabled;
   private boolean spsEnabled;
+  private Context spsctxt = null;
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
*/
@@ -479,8 +481,8 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-StoragePolicySatisfier.Context spsctxt = new IntraNNSPSContext(namesystem);
-sps = new StoragePolicySatisfier(namesystem, this, conf, spsctxt);
+spsctxt = new IntraSPSNameNodeContext(namesystem, this, conf);
+sps = new StoragePolicySatisfier(spsctxt);
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@@ -5031,8 +5033,8 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already running.");
   return;
 }
-
-sps.start(false);
+// TODO: FSDirectory will get removed via HDFS-12911 modularization work
+sps.start(false, namesystem.getFSDirectory());
   }
 
   /**
@@ -5068,8 +5070,8 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already running.");
   return;
 }
-
-sps.start(true);
+// TODO: FSDirectory will get removed via HDFS-12911 modularization work
+sps.start(true, 

[46/50] [abbrv] hadoop git commit: HDFS-13166: [SPS]: Implement caching mechanism to keep LIVE datanodes to minimize costly getLiveDatanodeStorageReport() calls. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-13166: [SPS]: Implement caching mechanism to keep LIVE datanodes to 
minimize costly getLiveDatanodeStorageReport() calls. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60861a99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60861a99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60861a99

Branch: refs/heads/HDFS-10285
Commit: 60861a99bb3dbe4acbbcefd056ec2cd49977e659
Parents: 8947d98
Author: Surendra Singh Lilhore 
Authored: Thu Mar 1 00:08:37 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 09:01:14 2018 +0530

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   5 +
 .../NamenodeProtocolServerSideTranslatorPB.java |  19 --
 .../NamenodeProtocolTranslatorPB.java   |  17 -
 .../hdfs/server/namenode/NameNodeRpcServer.java |  13 -
 .../hdfs/server/namenode/sps/Context.java   |  24 +-
 .../namenode/sps/DatanodeCacheManager.java  | 121 +++
 .../namenode/sps/IntraSPSNameNodeContext.java   |  23 +-
 .../namenode/sps/StoragePolicySatisfier.java| 340 ++-
 .../hdfs/server/protocol/NamenodeProtocol.java  |  16 -
 .../hdfs/server/sps/ExternalSPSContext.java |  32 +-
 .../src/main/proto/NamenodeProtocol.proto   |  25 --
 .../src/main/resources/hdfs-default.xml |  11 +
 .../src/site/markdown/ArchivalStorage.md|   2 +-
 .../TestStoragePolicySatisfyWorker.java |   3 +
 .../TestPersistentStoragePolicySatisfier.java   |   6 +
 .../TestStoragePolicySatisfierWithHA.java   |   3 +
 .../sps/TestStoragePolicySatisfier.java |   4 +
 ...stStoragePolicySatisfierWithStripedFile.java |  24 +-
 .../TestStoragePolicySatisfyAdminCommands.java  |   3 +
 19 files changed, 431 insertions(+), 260 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60861a99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index c79bf60..843d9d8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -640,6 +640,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String DFS_SPS_MAX_OUTSTANDING_PATHS_KEY =
   "dfs.storage.policy.satisfier.max.outstanding.paths";
   public static final int DFS_SPS_MAX_OUTSTANDING_PATHS_DEFAULT = 1;
+  // SPS datanode cache config, defaulting to 5mins.
+  public static final String DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS =
+  "dfs.storage.policy.satisfier.datanode.cache.refresh.interval.ms";
+  public static final long DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS_DEFAULT =
+  30L;
 
   // SPS keytab configurations, by default it is disabled.
   public static final String  DFS_SPS_ADDRESS_KEY =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60861a99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index ed176cc..e4283c6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -23,8 +23,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
@@ -277,21 +275,4 @@ public class NamenodeProtocolServerSideTranslatorPB 

[31/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by 
Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dbac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dbac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dbac

Branch: refs/heads/HDFS-10285
Commit: dbac4a8ad82f9290c8430b95e34026942b53
Parents: 82e10f5
Author: Uma Maheswara Rao G 
Authored: Fri Dec 22 09:10:12 2017 -0800
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:46:33 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|6 +-
 .../BlockStorageMovementAttemptedItems.java |  241 ---
 .../namenode/BlockStorageMovementNeeded.java|  574 --
 .../hdfs/server/namenode/FSNamesystem.java  |1 +
 .../hdfs/server/namenode/IntraNNSPSContext.java |   41 +
 .../server/namenode/StoragePolicySatisfier.java |  973 --
 .../sps/BlockStorageMovementAttemptedItems.java |  241 +++
 .../sps/BlockStorageMovementNeeded.java |  572 ++
 .../namenode/sps/StoragePolicySatisfier.java|  988 ++
 .../hdfs/server/namenode/sps/package-info.java  |   28 +
 .../TestBlockStorageMovementAttemptedItems.java |  196 --
 .../namenode/TestStoragePolicySatisfier.java| 1775 -
 ...stStoragePolicySatisfierWithStripedFile.java |  580 --
 .../TestBlockStorageMovementAttemptedItems.java |  196 ++
 .../sps/TestStoragePolicySatisfier.java | 1779 ++
 ...stStoragePolicySatisfierWithStripedFile.java |  580 ++
 16 files changed, 4430 insertions(+), 4341 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index c81ed6c..1cf687e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,11 +89,12 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
+import org.apache.hadoop.hdfs.server.namenode.IntraNNSPSContext;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
@@ -478,7 +479,8 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-sps = new StoragePolicySatisfier(namesystem, this, conf);
+StoragePolicySatisfier.Context spsctxt = new IntraNNSPSContext(namesystem);
+sps = new StoragePolicySatisfier(namesystem, this, conf, spsctxt);
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
deleted file mode 100644
index 643255f..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work 

[22/50] [abbrv] hadoop git commit: HDFS-12556: [SPS] : Block movement analysis should be done in read lock.

2018-07-04 Thread rakeshr
HDFS-12556: [SPS] : Block movement analysis should be done in read lock.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce8e9010
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce8e9010
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce8e9010

Branch: refs/heads/HDFS-10285
Commit: ce8e901010eb7a24731d9f931c972ff0d69431e5
Parents: 9202cac
Author: Surendra Singh Lilhore 
Authored: Sat Oct 14 15:11:26 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:43:57 2018 +0530

--
 .../server/namenode/StoragePolicySatisfier.java | 27 +---
 .../TestPersistentStoragePolicySatisfier.java   |  2 +-
 2 files changed, 19 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce8e9010/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index a28a806..cbfba44 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -242,12 +242,25 @@ public class StoragePolicySatisfier implements Runnable {
   ItemInfo itemInfo = storageMovementNeeded.get();
   if (itemInfo != null) {
 long trackId = itemInfo.getTrackId();
-BlockCollection blockCollection =
-namesystem.getBlockCollection(trackId);
-// Check blockCollectionId existence.
+BlockCollection blockCollection;
+BlocksMovingAnalysis status = null;
+try {
+  namesystem.readLock();
+  blockCollection = namesystem.getBlockCollection(trackId);
+  // Check blockCollectionId existence.
+  if (blockCollection == null) {
+// File doesn't exists (maybe got deleted), remove trackId from
+// the queue
+storageMovementNeeded.removeItemTrackInfo(itemInfo);
+  } else {
+status =
+analyseBlocksStorageMovementsAndAssignToDN(
+blockCollection);
+  }
+} finally {
+  namesystem.readUnlock();
+}
 if (blockCollection != null) {
-  BlocksMovingAnalysis status =
-  analyseBlocksStorageMovementsAndAssignToDN(blockCollection);
   switch (status.status) {
   // Just add to monitor, so it will be retried after timeout
   case ANALYSIS_SKIPPED_FOR_RETRY:
@@ -283,10 +296,6 @@ public class StoragePolicySatisfier implements Runnable {
 storageMovementNeeded.removeItemTrackInfo(itemInfo);
 break;
   }
-} else {
-  // File doesn't exists (maybe got deleted), remove trackId from
-  // the queue
-  storageMovementNeeded.removeItemTrackInfo(itemInfo);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce8e9010/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index 5bce296..7165d06 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -72,7 +72,7 @@ public class TestPersistentStoragePolicySatisfier {
   {StorageType.DISK, StorageType.ARCHIVE, StorageType.SSD}
   };
 
-  private final int timeout = 30;
+  private final int timeout = 9;
 
   /**
* Setup environment for every test case.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: HDFS-11572. [SPS]: SPS should clean Xattrs when no blocks required to satisfy for a file. Contributed by Uma Maheswara Rao G

2018-07-04 Thread rakeshr
HDFS-11572. [SPS]: SPS should clean Xattrs when no blocks required to satisfy 
for a file. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cda66ec3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cda66ec3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cda66ec3

Branch: refs/heads/HDFS-10285
Commit: cda66ec3d025c36b8ce160715cd8fd9a2e1fb287
Parents: 2d3e8ac
Author: Rakesh Radhakrishnan 
Authored: Thu Apr 20 23:14:36 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:34:51 2018 +0530

--
 .../BlockStorageMovementAttemptedItems.java |   2 +-
 .../server/namenode/StoragePolicySatisfier.java | 116 ++-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  35 ++
 .../TestPersistentStoragePolicySatisfier.java   |  52 +
 .../namenode/TestStoragePolicySatisfier.java|  76 
 5 files changed, 225 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cda66ec3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index f2406da..bf7859c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -333,7 +333,7 @@ public class BlockStorageMovementAttemptedItems {
   + "doesn't exists in storageMovementAttemptedItems list",
   storageMovementAttemptedResult.getTrackId());
   // Remove xattr for the track id.
-  this.sps.notifyBlkStorageMovementFinished(
+  this.sps.postBlkStorageMovementCleanup(
   storageMovementAttemptedResult.getTrackId());
 }
 break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cda66ec3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 8be0a2a..3b20314 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -79,6 +79,27 @@ public class StoragePolicySatisfier implements Runnable {
   private final BlockStorageMovementAttemptedItems storageMovementsMonitor;
   private volatile boolean isRunning = false;
 
+  /**
+   * Represents the collective analysis status for all blocks.
+   */
+  private enum BlocksMovingAnalysisStatus {
+// Represents that, the analysis skipped due to some conditions. A such
+// condition is if block collection is in incomplete state.
+ANALYSIS_SKIPPED_FOR_RETRY,
+// Represents that, all block storage movement needed blocks found its
+// targets.
+ALL_BLOCKS_TARGETS_PAIRED,
+// Represents that, only fewer or none of the block storage movement needed
+// block found its eligible targets.
+FEW_BLOCKS_TARGETS_PAIRED,
+// Represents that, none of the blocks found for block storage movements.
+BLOCKS_ALREADY_SATISFIED,
+// Represents that, the analysis skipped due to some conditions.
+// Example conditions are if no blocks really exists in block collection or
+// if analysis is not required on ec files with unsuitable storage policies
+BLOCKS_TARGET_PAIRING_SKIPPED;
+  }
+
   public StoragePolicySatisfier(final Namesystem namesystem,
   final BlockStorageMovementNeeded storageMovementNeeded,
   final BlockManager blkManager, Configuration conf) {
@@ -208,10 +229,31 @@ public class StoragePolicySatisfier implements Runnable {
 namesystem.getBlockCollection(blockCollectionID);
 // Check blockCollectionId existence.
 if (blockCollection != null) {
-  boolean allBlockLocsAttemptedToSatisfy =
-  
computeAndAssignStorageMismatchedBlocksToDNs(blockCollection);
-  this.storageMovementsMonitor
- 

[15/50] [abbrv] hadoop git commit: HDFS-12152: [SPS]: Re-arrange StoragePolicySatisfyWorker stopping sequence to improve thread cleanup time. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-12152: [SPS]: Re-arrange StoragePolicySatisfyWorker stopping sequence to 
improve thread cleanup time. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/771a1a45
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/771a1a45
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/771a1a45

Branch: refs/heads/HDFS-10285
Commit: 771a1a4560399d3a9227f5cdbd6cbabc85bd5082
Parents: 8d1e491
Author: Uma Maheswara Rao G 
Authored: Wed Jul 19 00:55:26 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:35:55 2018 +0530

--
 .../datanode/BlockStorageMovementTracker.java   | 16 
 .../server/datanode/StoragePolicySatisfyWorker.java |  5 +++--
 2 files changed, 15 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/771a1a45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index c7e952b..f3d2bb6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -77,7 +77,8 @@ public class BlockStorageMovementTracker implements Runnable {
 moverTaskFutures.wait(2000);
   }
 } catch (InterruptedException ignore) {
-  // ignore
+  // Sets interrupt flag of this thread.
+  Thread.currentThread().interrupt();
 }
   }
   try {
@@ -102,12 +103,19 @@ public class BlockStorageMovementTracker implements 
Runnable {
 synchronized (moverTaskFutures) {
   moverTaskFutures.remove(trackId);
 }
-// handle completed or inprogress blocks movements per trackId.
-blksMovementsStatusHandler.handle(resultPerTrackIdList);
+if (running) {
+  // handle completed or inprogress blocks movements per trackId.
+  blksMovementsStatusHandler.handle(resultPerTrackIdList);
+}
 movementResults.remove(trackId);
   }
 }
-  } catch (ExecutionException | InterruptedException e) {
+  } catch (InterruptedException e) {
+if (running) {
+  LOG.error("Exception while moving block replica to target storage"
+  + " type", e);
+}
+  } catch (ExecutionException e) {
 // TODO: Do we need failure retries and implement the same if required.
 LOG.error("Exception while moving block replica to target storage 
type",
 e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/771a1a45/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index 196cd58..4e57805 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -137,8 +137,8 @@ public class StoragePolicySatisfyWorker {
* thread.
*/
   void stop() {
-movementTrackerThread.interrupt();
 movementTracker.stopTracking();
+movementTrackerThread.interrupt();
   }
 
   /**
@@ -147,7 +147,8 @@ public class StoragePolicySatisfyWorker {
   void waitToFinishWorkerThread() {
 try {
   movementTrackerThread.join(3000);
-} catch (InterruptedException ie) {
+} catch (InterruptedException ignore) {
+  // ignore
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] [abbrv] hadoop git commit: HDFS-12291: [SPS]: Provide a mechanism to recursively iterate and satisfy storage policy of all the files under the given dir. Contributed by Surendra Singh Lilhore.

2018-07-04 Thread rakeshr
HDFS-12291: [SPS]: Provide a mechanism to recursively iterate and satisfy 
storage policy of all the files under the given dir. Contributed by Surendra 
Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c076d16
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c076d16
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c076d16

Branch: refs/heads/HDFS-10285
Commit: 7c076d16ae9af47071748aa7a2c7439c85e85644
Parents: bcc3236
Author: Uma Maheswara Rao G 
Authored: Sat Sep 30 06:31:52 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:43:46 2018 +0530

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  22 +-
 .../BlockStorageMovementAttemptedItems.java |   8 +-
 .../namenode/BlockStorageMovementNeeded.java| 277 +++---
 .../server/namenode/ReencryptionHandler.java|   1 +
 .../server/namenode/StoragePolicySatisfier.java |  43 ++-
 .../src/main/resources/hdfs-default.xml |  23 ++
 .../src/site/markdown/ArchivalStorage.md|   3 +-
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../TestPersistentStoragePolicySatisfier.java   |   8 +-
 .../namenode/TestStoragePolicySatisfier.java| 377 ++-
 11 files changed, 689 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c076d16/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index cf5206a..c8fa40c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -612,6 +612,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.storage.policy.satisfier.enabled";
   public static final boolean DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT =
   false;
+  public static final String  DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY =
+  "dfs.storage.policy.satisfier.queue.limit";
+  public static final int  DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_DEFAULT =
+  1000;
+  public static final String DFS_SPS_WORK_MULTIPLIER_PER_ITERATION =
+  "dfs.storage.policy.satisfier.work.multiplier.per.iteration";
+  public static final int DFS_SPS_WORK_MULTIPLIER_PER_ITERATION_DEFAULT =
+  1;
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.recheck.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c076d16/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index f5ceeaf..c26599c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1457,7 +1457,27 @@ public class DFSUtil {
 "It should be a positive, non-zero integer value.");
 return blocksReplWorkMultiplier;
   }
-  
+
+  /**
+   * Get DFS_SPS_WORK_MULTIPLIER_PER_ITERATION from
+   * configuration.
+   *
+   * @param conf Configuration
+   * @return Value of DFS_SPS_WORK_MULTIPLIER_PER_ITERATION
+   */
+  public static int getSPSWorkMultiplier(Configuration conf) {
+int spsWorkMultiplier = conf
+.getInt(
+DFSConfigKeys.DFS_SPS_WORK_MULTIPLIER_PER_ITERATION,
+DFSConfigKeys.DFS_SPS_WORK_MULTIPLIER_PER_ITERATION_DEFAULT);
+Preconditions.checkArgument(
+(spsWorkMultiplier > 0),
+DFSConfigKeys.DFS_SPS_WORK_MULTIPLIER_PER_ITERATION +
+" = '" + spsWorkMultiplier + "' is invalid. " +
+"It should be a positive, non-zero integer value.");
+return spsWorkMultiplier;
+  }
+
   /**
* Get SPNEGO keytab Key from configuration
* 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c076d16/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 

[25/50] [abbrv] hadoop git commit: HDFS-12106: [SPS]: Improve storage policy satisfier configurations. Contributed by Surendra Singh Lilhore.

2018-07-04 Thread rakeshr
HDFS-12106: [SPS]: Improve storage policy satisfier configurations. Contributed 
by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82e10f52
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82e10f52
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82e10f52

Branch: refs/heads/HDFS-10285
Commit: 82e10f522ad0715d1fd2dd0439171ed870874584
Parents: d1b810e
Author: Surendra Singh Lilhore 
Authored: Wed Nov 15 20:22:27 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:46:28 2018 +0530

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |  6 +++
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  4 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  3 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 10 ++--
 .../server/blockmanagement/DatanodeManager.java | 12 ++---
 .../datanode/StoragePolicySatisfyWorker.java|  3 +-
 .../BlockStorageMovementAttemptedItems.java |  8 +--
 .../namenode/BlockStorageMovementNeeded.java| 46 
 .../hdfs/server/namenode/FSNamesystem.java  |  3 ++
 .../server/namenode/StoragePolicySatisfier.java | 42 ---
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   | 27 +++---
 .../src/main/resources/hdfs-default.xml | 17 --
 .../src/site/markdown/ArchivalStorage.md|  2 +-
 .../TestBlockStorageMovementAttemptedItems.java | 10 ++--
 .../namenode/TestStoragePolicySatisfier.java| 57 ++--
 15 files changed, 199 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82e10f52/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 190a1c6..aabcdd9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -150,6 +150,12 @@ public final class HdfsConstants {
 SUCCESS,
 
 /**
+ * Few blocks failed to move and the path is still not
+ * fully satisfied the storage policy.
+ */
+FAILURE,
+
+/**
  * Status not available.
  */
 NOT_AVAILABLE

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82e10f52/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 582693f..84812ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -3388,6 +3388,8 @@ public class PBHelperClient {
   return StoragePolicySatisfyPathStatus.IN_PROGRESS;
 case SUCCESS:
   return StoragePolicySatisfyPathStatus.SUCCESS;
+case FAILURE:
+  return StoragePolicySatisfyPathStatus.FAILURE;
 case NOT_AVAILABLE:
   return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
 default:
@@ -3404,6 +3406,8 @@ public class PBHelperClient {
   return HdfsConstants.StoragePolicySatisfyPathStatus.IN_PROGRESS;
 case SUCCESS:
   return HdfsConstants.StoragePolicySatisfyPathStatus.SUCCESS;
+case FAILURE:
+  return HdfsConstants.StoragePolicySatisfyPathStatus.FAILURE;
 case NOT_AVAILABLE:
   return HdfsConstants.StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
 default:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82e10f52/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 2b666c3..fc09a7e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -850,7 +850,8 @@ message CheckStoragePolicySatisfyPathStatusResponseProto {
 PENDING = 0;
 IN_PROGRESS = 1;
 SUCCESS = 2;
-NOT_AVAILABLE = 3;
+

[09/50] [abbrv] hadoop git commit: HDFS-11670: [SPS]: Add CLI command for satisfy storage policy operations. Contributed by Surendra Singh Lilhore.

2018-07-04 Thread rakeshr
HDFS-11670: [SPS]: Add CLI command for satisfy storage policy operations. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0542c87a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0542c87a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0542c87a

Branch: refs/heads/HDFS-10285
Commit: 0542c87a91b7089f25c6cd1110d6d2b70c18bcb3
Parents: b1d8dcc
Author: Uma Maheswara Rao G 
Authored: Mon Jun 19 17:16:49 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:35:24 2018 +0530

--
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   | 93 +++-
 .../src/site/markdown/ArchivalStorage.md| 21 +
 .../src/site/markdown/HDFSCommands.md   |  2 +
 .../hdfs/tools/TestStoragePolicyCommands.java   | 43 -
 4 files changed, 157 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0542c87a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index aeb10d9..662957c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -32,6 +33,8 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
 import java.io.FileNotFoundException;
+import com.google.common.base.Joiner;
+
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
@@ -245,6 +248,92 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
 }
   }
 
+  /** Command to schedule blocks to move based on specified policy. */
+  private static class SatisfyStoragePolicyCommand implements
+  AdminHelper.Command {
+@Override
+public String getName() {
+  return "-satisfyStoragePolicy";
+}
+
+@Override
+public String getShortUsage() {
+  return "[" + getName() + " -path ]\n";
+}
+
+@Override
+public String getLongUsage() {
+  TableListing listing = AdminHelper.getOptionDescriptionListing();
+  listing.addRow("", "The path of the file/directory to satisfy"
+  + " storage policy");
+  return getShortUsage() + "\n" +
+  "Schedule blocks to move based on file/directory policy.\n\n" +
+  listing.toString();
+}
+
+@Override
+public int run(Configuration conf, List args) throws IOException {
+  final String path = StringUtils.popOptionWithArgument("-path", args);
+  if (path == null) {
+System.err.println("Please specify the path for setting the storage " +
+"policy.\nUsage: " + getLongUsage());
+return 1;
+  }
+
+  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+  try {
+dfs.satisfyStoragePolicy(new Path(path));
+System.out.println("Scheduled blocks to move based on the current"
++ " storage policy on " + path);
+  } catch (Exception e) {
+System.err.println(AdminHelper.prettifyException(e));
+return 2;
+  }
+  return 0;
+}
+  }
+
+  /** Command to check storage policy satisfier status. */
+  private static class IsSPSRunningCommand implements AdminHelper.Command {
+@Override
+public String getName() {
+  return "-isSPSRunning";
+}
+
+@Override
+public String getShortUsage() {
+  return "[" + getName() + "]\n";
+}
+
+@Override
+public String getLongUsage() {
+  return getShortUsage() + "\n" +
+  "Check the status of Storage Policy Statisfier.\n\n";
+}
+
+@Override
+public int run(Configuration conf, List args) throws IOException {
+  if (!args.isEmpty()) {
+System.err.print("Can't understand arguments: "
++ Joiner.on(" ").join(args) + "\n");
+System.err.println("Usage is " + getLongUsage());
+return 1;
+  }
+  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+  try {
+if(dfs.getClient().isStoragePolicySatisfierRunning()){
+  

[04/50] [abbrv] hadoop git commit: HDFS-11695: [SPS]: Namenode failed to start while loading SPS xAttrs from the edits log. Contributed by Surendra Singh Lilhore.

2018-07-04 Thread rakeshr
HDFS-11695: [SPS]: Namenode failed to start while loading SPS xAttrs from the 
edits log. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bfe547e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bfe547e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bfe547e

Branch: refs/heads/HDFS-10285
Commit: 8bfe547e572d3aa6c77d2abf1861d6a8cfd08da5
Parents: cda66ec
Author: Uma Maheswara Rao G 
Authored: Mon May 22 21:39:43 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:34:57 2018 +0530

--
 .../hdfs/server/namenode/FSDirAttrOp.java   |  91 
 .../namenode/FSDirSatisfyStoragePolicyOp.java   | 145 +++
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   2 +-
 .../hdfs/server/namenode/FSDirectory.java   |  16 --
 .../hdfs/server/namenode/FSNamesystem.java  |  24 ++-
 .../hadoop/hdfs/server/namenode/Namesystem.java |  10 ++
 .../server/namenode/StoragePolicySatisfier.java |   4 +-
 .../TestPersistentStoragePolicySatisfier.java   |  90 +++-
 .../namenode/TestStoragePolicySatisfier.java|   5 +-
 9 files changed, 268 insertions(+), 119 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bfe547e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 0df58bf..1dbee96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -43,14 +42,12 @@ import com.google.common.collect.Lists;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 
 public class FSDirAttrOp {
   static FileStatus setPermission(
@@ -193,29 +190,6 @@ public class FSDirAttrOp {
 return fsd.getAuditFileInfo(iip);
   }
 
-  static FileStatus satisfyStoragePolicy(FSDirectory fsd, BlockManager bm,
-  String src, boolean logRetryCache) throws IOException {
-
-FSPermissionChecker pc = fsd.getPermissionChecker();
-List xAttrs = Lists.newArrayListWithCapacity(1);
-INodesInPath iip;
-fsd.writeLock();
-try {
-
-  // check operation permission.
-  iip = fsd.resolvePath(pc, src, DirOp.WRITE);
-  if (fsd.isPermissionEnabled()) {
-fsd.checkPathAccess(pc, iip, FsAction.WRITE);
-  }
-  XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
-  xAttrs.add(satisfyXAttr);
-} finally {
-  fsd.writeUnlock();
-}
-fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
-return fsd.getAuditFileInfo(iip);
-  }
-
   static BlockStoragePolicy[] getStoragePolicies(BlockManager bm)
   throws IOException {
 return bm.getStoragePolicies();
@@ -477,71 +451,6 @@ public class FSDirAttrOp {
 }
   }
 
-  static XAttr unprotectedSatisfyStoragePolicy(INodesInPath iip,
-  BlockManager bm, FSDirectory fsd) throws IOException {
-
-final INode inode = FSDirectory.resolveLastINode(iip);
-final int snapshotId = iip.getLatestSnapshotId();
-final List candidateNodes = new ArrayList<>();
-
-// TODO: think about optimization here, label the dir instead
-// of the sub-files of the dir.
-if (inode.isFile()) {
-  candidateNodes.add(inode);
-} else if (inode.isDirectory()) {
-  for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
-if (node.isFile()) {
-  candidateNodes.add(node);
-}
-  }
-}
-
-// If node has satisfy xattr, then stop adding it
-// to satisfy movement queue.
-if (inodeHasSatisfyXAttr(candidateNodes)) {
-  throw new 

[12/50] [abbrv] hadoop git commit: HDFS-11874. [SPS]: Document the SPS feature. Contributed by Uma Maheswara Rao G

2018-07-04 Thread rakeshr
HDFS-11874. [SPS]: Document the SPS feature. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc6d57fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc6d57fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc6d57fe

Branch: refs/heads/HDFS-10285
Commit: fc6d57fec4a6438ed1b6a39a7e3bc037e8d51e71
Parents: 6b78e3f
Author: Rakesh Radhakrishnan 
Authored: Fri Jul 14 22:36:09 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:35:39 2018 +0530

--
 .../src/site/markdown/ArchivalStorage.md| 51 ++--
 1 file changed, 48 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6d57fe/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index a56cf8b..9098616 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -97,8 +97,44 @@ The effective storage policy can be retrieved by the 
"[`storagepolicies -getStor
 
 The default storage type of a datanode storage location will be DISK if it 
does not have a storage type tagged explicitly.
 
-Mover - A New Data Migration Tool
--
+Storage Policy Based Data Movement
+--
+
+Setting a new storage policy on already existing file/dir will change the 
policy in Namespace, but it will not move the blocks physically across storage 
medias.
+Following 2 options will allow users to move the blocks based on new policy 
set. So, once user change/set to a new policy on file/directory, user should 
also perform one of the following options to achieve the desired data movement. 
Note that both options cannot be allowed to run simultaneously.
+
+### Storage Policy Satisfier (SPS)
+
+When user changes the storage policy on a file/directory, user can call 
`HdfsAdmin` API `satisfyStoragePolicy()` to move the blocks as per the new 
policy set.
+The SPS daemon thread runs along with namenode and periodically scans for the 
storage mismatches between new policy set and the physical blocks placed. This 
will only track the files/directories for which user invoked 
satisfyStoragePolicy. If SPS identifies some blocks to be moved for a file, 
then it will schedule block movement tasks to datanodes. A Coordinator 
DataNode(C-DN) will track all block movements associated to a file and notify 
to namenode about movement success/failure. If there are any failures in 
movement, the SPS will re-attempt by sending new block movement task.
+
+SPS can be activated and deactivated dynamically without restarting the 
Namenode.
+
+Detailed design documentation can be found at [Storage Policy Satisfier(SPS) 
(HDFS-10285)](https://issues.apache.org/jira/browse/HDFS-10285)
+
+* **Note**: When user invokes `satisfyStoragePolicy()` API on a directory, SPS 
will consider the files which are immediate to that directory. Sub-directories 
won't be considered for satisfying the policy. Its user responsibility to call 
this API on directories recursively, to track all files under the sub tree.
+
+* HdfsAdmin API :
+`public void satisfyStoragePolicy(final Path path) throws IOException`
+
+* Arguments :
+
+| | |
+|: |: |
+| `path` | A path which requires blocks storage movement. |
+
+Configurations:
+
+*   **dfs.storage.policy.satisfier.activate** - Used to activate or deactivate 
SPS. Configuring true represents SPS is
+   activated and vice versa.
+
+*   **dfs.storage.policy.satisfier.recheck.timeout.millis** - A timeout to 
re-check the processed block storage movement
+   command results from Co-ordinator Datanode.
+
+*   **dfs.storage.policy.satisfier.self.retry.timeout.millis** - A timeout to 
retry if no block movement results reported from
+   Co-ordinator Datanode in this configured timeout.
+
+### Mover - A New Data Migration Tool
 
 A new data migration tool is added for archiving data. The tool is similar to 
Balancer. It periodically scans the files in HDFS to check if the block 
placement satisfies the storage policy. For the blocks violating the storage 
policy, it moves the replicas to a different storage type in order to fulfill 
the storage policy requirement. Note that it always tries to move block 
replicas within the same node whenever possible. If that is not possible (e.g. 
when a node doesn’t have the target storage type) then it will copy the block 
replicas to another node over the network.
 
@@ -115,6 +151,10 @@ A new data migration tool 

[19/50] [abbrv] hadoop git commit: HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block storage movements. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9202cac4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 57e9f94..70219f6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -203,11 +203,11 @@ public class TestStoragePolicySatisfier {
   }
 
   /**
-   * Tests to verify that the block storage movement results will be propagated
+   * Tests to verify that the block storage movement report will be propagated
* to Namenode via datanode heartbeat.
*/
   @Test(timeout = 30)
-  public void testPerTrackIdBlocksStorageMovementResults() throws Exception {
+  public void testBlksStorageMovementAttemptFinishedReport() throws Exception {
 try {
   createCluster();
   // Change policy to ONE_SSD
@@ -229,7 +229,7 @@ public class TestStoragePolicySatisfier {
   DFSTestUtil.waitExpectedStorageType(
   file, StorageType.DISK, 2, 3, dfs);
 
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
 } finally {
   shutdownCluster();
 }
@@ -276,7 +276,7 @@ public class TestStoragePolicySatisfier {
 fileName, StorageType.DISK, 2, 3, dfs);
   }
 
-  waitForBlocksMovementResult(files.size(), 3);
+  waitForBlocksMovementAttemptReport(files.size(), 3);
 } finally {
   shutdownCluster();
 }
@@ -457,7 +457,7 @@ public class TestStoragePolicySatisfier {
   DFSTestUtil.waitExpectedStorageType(
   file, StorageType.DISK, 2, 3, dfs);
 
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
 } finally {
   shutdownCluster();
 }
@@ -630,7 +630,7 @@ public class TestStoragePolicySatisfier {
   // No block movement will be scheduled as there is no target node
   // available with the required storage type.
   waitForAttemptedItems(1, 3);
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
   DFSTestUtil.waitExpectedStorageType(
   file1, StorageType.ARCHIVE, 1, 3, dfs);
   DFSTestUtil.waitExpectedStorageType(
@@ -691,7 +691,7 @@ public class TestStoragePolicySatisfier {
   DFSTestUtil.waitExpectedStorageType(
   file, StorageType.DISK, 3, 3, dfs);
 
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
 } finally {
   shutdownCluster();
 }
@@ -871,7 +871,7 @@ public class TestStoragePolicySatisfier {
   Set dns = hdfsCluster.getNamesystem()
   .getBlockManager().getDatanodeManager().getDatanodes();
   for (DatanodeDescriptor dd : dns) {
-assertNull(dd.getBlocksToMoveStorages());
+assertNull(dd.getBlocksToMoveStorages(1));
   }
 
   // Enable heart beats now
@@ -1224,7 +1224,7 @@ public class TestStoragePolicySatisfier {
   /**
* Test SPS for batch processing.
*/
-  @Test(timeout = 30)
+  @Test(timeout = 300)
   public void testBatchProcessingForSPSDirectory() throws Exception {
 try {
   StorageType[][] diskTypes = new StorageType[][] {
@@ -1252,7 +1252,7 @@ public class TestStoragePolicySatisfier {
 DFSTestUtil.waitExpectedStorageType(fileName, StorageType.ARCHIVE, 2,
 3, dfs);
   }
-  waitForBlocksMovementResult(files.size(), 3);
+  waitForBlocksMovementAttemptReport(files.size(), 3);
   String expectedLogMessage = "StorageMovementNeeded queue remaining"
   + " capacity is zero";
   assertTrue("Log output does not contain expected log message: "
@@ -1268,7 +1268,7 @@ public class TestStoragePolicySatisfier {
*  1. Delete /root when traversing Q
*  2. U, R, S should not be in queued.
*/
-  @Test
+  @Test(timeout = 30)
   public void testTraverseWhenParentDeleted() throws Exception {
 StorageType[][] diskTypes = new StorageType[][] {
 {StorageType.DISK, StorageType.ARCHIVE},
@@ -1330,7 +1330,7 @@ public class TestStoragePolicySatisfier {
*  1. Delete L when traversing Q
*  2. E, M, U, R, S should not be in queued.
*/
-  @Test
+  @Test(timeout = 30)
   public void testTraverseWhenRootParentDeleted() throws Exception {
 StorageType[][] diskTypes = new StorageType[][] {
 {StorageType.DISK, StorageType.ARCHIVE},
@@ -1387,6 +1387,82 @@ public class TestStoragePolicySatisfier {
 dfs.delete(new Path("/root"), 

[13/50] [abbrv] hadoop git commit: HDFS-12146. [SPS]: Fix TestStoragePolicySatisfierWithStripedFile#testSPSWhenFileHasLowRedundancyBlocks. Contributed by Surendra Singh Lilhore.

2018-07-04 Thread rakeshr
HDFS-12146. [SPS]: Fix 
TestStoragePolicySatisfierWithStripedFile#testSPSWhenFileHasLowRedundancyBlocks.
 Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ecae2af7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ecae2af7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ecae2af7

Branch: refs/heads/HDFS-10285
Commit: ecae2af71511a187005a589a39db818610dc950c
Parents: fc6d57f
Author: Rakesh Radhakrishnan 
Authored: Mon Jul 17 22:40:03 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:35:44 2018 +0530

--
 .../server/namenode/TestStoragePolicySatisfier.java |  9 +
 .../TestStoragePolicySatisfierWithStripedFile.java  | 16 
 2 files changed, 13 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecae2af7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index be7236b..10ceae7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -1025,12 +1025,13 @@ public class TestStoragePolicySatisfier {
   list.add(cluster.stopDataNode(0));
   list.add(cluster.stopDataNode(0));
   cluster.restartNameNodes();
-  cluster.restartDataNode(list.get(0), true);
-  cluster.restartDataNode(list.get(1), true);
+  cluster.restartDataNode(list.get(0), false);
+  cluster.restartDataNode(list.get(1), false);
   cluster.waitActive();
   fs.satisfyStoragePolicy(filePath);
-  Thread.sleep(3000 * 6);
-  cluster.restartDataNode(list.get(2), true);
+  DFSTestUtil.waitExpectedStorageType(filePath.toString(),
+  StorageType.ARCHIVE, 2, 3, cluster.getFileSystem());
+  cluster.restartDataNode(list.get(2), false);
   DFSTestUtil.waitExpectedStorageType(filePath.toString(),
   StorageType.ARCHIVE, 3, 3, cluster.getFileSystem());
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecae2af7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
index f905ead..c070113 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
@@ -308,8 +308,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
*/
   @Test(timeout = 30)
   public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
-// start 10 datanodes
-int numOfDatanodes = 10;
+// start 9 datanodes
+int numOfDatanodes = 9;
 int storagesPerDatanode = 2;
 long capacity = 20 * defaultStripeBlockSize;
 long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
@@ -338,7 +338,6 @@ public class TestStoragePolicySatisfierWithStripedFile {
 {StorageType.DISK, StorageType.ARCHIVE},
 {StorageType.DISK, StorageType.ARCHIVE},
 {StorageType.DISK, StorageType.ARCHIVE},
-{StorageType.DISK, StorageType.ARCHIVE},
 {StorageType.DISK, StorageType.ARCHIVE}})
 .storageCapacities(capacities)
 .build();
@@ -366,15 +365,16 @@ public class TestStoragePolicySatisfierWithStripedFile {
   }
   cluster.restartNameNodes();
   // Restart half datanodes
-  for (int i = 0; i < numOfDatanodes / 2; i++) {
-cluster.restartDataNode(list.get(i), true);
+  for (int i = 0; i < 5; i++) {
+cluster.restartDataNode(list.get(i), false);
   }
   cluster.waitActive();
   fs.satisfyStoragePolicy(fooFile);
-  Thread.sleep(3000 * 6);
+  DFSTestUtil.waitExpectedStorageType(fooFile.toString(),
+  StorageType.ARCHIVE, 5, 3, cluster.getFileSystem());
   //Start reaming datanodes
-  

[05/50] [abbrv] hadoop git commit: HDFS-11883: [SPS] : Handle NPE in BlockStorageMovementTracker when dropSPSWork() called. Contributed by Surendra Singh Lilhore.

2018-07-04 Thread rakeshr
HDFS-11883: [SPS] : Handle NPE in BlockStorageMovementTracker when 
dropSPSWork() called. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08a199fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08a199fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08a199fe

Branch: refs/heads/HDFS-10285
Commit: 08a199fea1b081e0c959a395a2ae69c672bafcb5
Parents: 8bfe547
Author: Uma Maheswara Rao G 
Authored: Tue May 30 18:12:17 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:35:02 2018 +0530

--
 .../hdfs/server/datanode/BlockStorageMovementTracker.java  | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08a199fe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index 99858bc..c7e952b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -88,13 +88,17 @@ public class BlockStorageMovementTracker implements 
Runnable {
   long trackId = result.getTrackId();
   List> blocksMoving = moverTaskFutures
   .get(trackId);
+  if (blocksMoving == null) {
+LOG.warn("Future task doesn't exist for trackId " + trackId);
+continue;
+  }
   blocksMoving.remove(future);
 
   List resultPerTrackIdList =
   addMovementResultToTrackIdList(result);
 
   // Completed all the scheduled blocks movement under this 'trackId'.
-  if (blocksMoving.isEmpty()) {
+  if (blocksMoving.isEmpty() || moverTaskFutures.get(trackId) == null) 
{
 synchronized (moverTaskFutures) {
   moverTaskFutures.remove(trackId);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: HDFS-12214: [SPS]: Fix review comments of StoragePolicySatisfier feature. Contributed by Rakesh R.

2018-07-04 Thread rakeshr
HDFS-12214: [SPS]: Fix review comments of StoragePolicySatisfier feature. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a59eb621
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a59eb621
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a59eb621

Branch: refs/heads/HDFS-10285
Commit: a59eb621c5e535aa7486847e5287baa172876422
Parents: 771a1a4
Author: Uma Maheswara Rao G 
Authored: Thu Aug 17 13:21:07 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 5 08:36:01 2018 +0530

--
 .../hadoop-hdfs/src/main/bin/hdfs   |   2 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +-
 .../server/blockmanagement/BlockManager.java| 104 +++
 .../BlockStorageMovementAttemptedItems.java |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  20 ++--
 .../hadoop/hdfs/server/namenode/NameNode.java   |  22 ++--
 .../server/namenode/StoragePolicySatisfier.java |  20 ++--
 .../protocol/BlocksStorageMovementResult.java   |   2 +-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  11 +-
 .../src/main/resources/hdfs-default.xml |  10 +-
 .../src/site/markdown/ArchivalStorage.md|  14 +--
 .../src/site/markdown/HDFSCommands.md   |   2 +-
 .../TestStoragePolicySatisfyWorker.java |   2 +-
 .../hadoop/hdfs/server/mover/TestMover.java |  22 ++--
 .../hdfs/server/mover/TestStorageMover.java |   2 +-
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../namenode/TestNameNodeReconfigure.java   |  99 --
 .../TestPersistentStoragePolicySatisfier.java   |   6 +-
 .../namenode/TestStoragePolicySatisfier.java|  35 +--
 .../TestStoragePolicySatisfierWithHA.java   |  10 +-
 ...stStoragePolicySatisfierWithStripedFile.java |   8 ++
 .../hdfs/tools/TestStoragePolicyCommands.java   |  21 ++--
 22 files changed, 265 insertions(+), 161 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a59eb621/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 38be348b..bc6e7a4 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -62,7 +62,7 @@ function hadoop_usage
   hadoop_add_subcommand "portmap" daemon "run a portmap service"
   hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary 
namenode"
   hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a 
directory or diff the current directory contents with a snapshot"
-  hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage 
policies"
+  hadoop_add_subcommand "storagepolicies" admin 
"list/get/set/satisfyStoragePolicy block storage policies"
   hadoop_add_subcommand "version" client "print the version"
   hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a59eb621/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b5341a2..cf5206a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -608,10 +608,10 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final intDFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; 
// One minute
 
   // SPS related configurations
-  public static final String  DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY =
-  "dfs.storage.policy.satisfier.activate";
-  public static final boolean DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT =
-  true;
+  public static final String  DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY =
+  "dfs.storage.policy.satisfier.enabled";
+  public static final boolean DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT =
+  false;
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.recheck.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a59eb621/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

[1/2] hadoop git commit: HDDS-212. Introduce NodeStateManager to manage the state of Datanodes in SCM. Contributed by Nanda kumar.

2018-07-04 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3b637155a -> 71df8c27c


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
new file mode 100644
index 000..dd91866
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
@@ -0,0 +1,281 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.node.states;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
+
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * Maintains the state of datanodes in SCM. This class should only be used by
+ * NodeStateManager to maintain the state. If anyone wants to change the
+ * state of a node they should call NodeStateManager, do not directly use
+ * this class.
+ */
+public class NodeStateMap {
+
+  /**
+   * Node id to node info map.
+   */
+  private final ConcurrentHashMap nodeMap;
+  /**
+   * Represents the current state of node.
+   */
+  private final ConcurrentHashMap> stateMap;
+  private final ReadWriteLock lock;
+
+  /**
+   * Creates a new instance of NodeStateMap with no nodes.
+   */
+  public NodeStateMap() {
+lock = new ReentrantReadWriteLock();
+nodeMap = new ConcurrentHashMap<>();
+stateMap = new ConcurrentHashMap<>();
+initStateMap();
+  }
+
+  /**
+   * Initializes the state map with available states.
+   */
+  private void initStateMap() {
+for (NodeState state : NodeState.values()) {
+  stateMap.put(state, new HashSet<>());
+}
+  }
+
+  /**
+   * Adds a node to NodeStateMap.
+   *
+   * @param datanodeDetails DatanodeDetails
+   * @param nodeState initial NodeState
+   *
+   * @throws NodeAlreadyExistsException if the node already exist
+   */
+  public void addNode(DatanodeDetails datanodeDetails, NodeState nodeState)
+  throws NodeAlreadyExistsException {
+lock.writeLock().lock();
+try {
+  UUID id = datanodeDetails.getUuid();
+  if (nodeMap.containsKey(id)) {
+throw new NodeAlreadyExistsException("Node UUID: " + id);
+  }
+  nodeMap.put(id, new DatanodeInfo(datanodeDetails));
+  stateMap.get(nodeState).add(id);
+} finally {
+  lock.writeLock().unlock();
+}
+  }
+
+  /**
+   * Updates the node state.
+   *
+   * @param nodeId Node Id
+   * @param currentState current state
+   * @param newState new state
+   *
+   * @throws NodeNotFoundException if the node is not present
+   */
+  public void updateNodeState(UUID nodeId, NodeState currentState,
+  NodeState newState)throws NodeNotFoundException {
+lock.writeLock().lock();
+try {
+  if (stateMap.get(currentState).remove(nodeId)) {
+stateMap.get(newState).add(nodeId);
+  } else {
+throw new NodeNotFoundException("Node UUID: " + nodeId +
+", not found in state: " + currentState);
+  }
+} finally {
+  lock.writeLock().unlock();
+}
+  }
+
+  /**
+   * Returns DatanodeDetails for the given node id.
+   *
+   * @param uuid Node Id
+   *
+   * @return DatanodeDetails of the node
+   *
+   * @throws NodeNotFoundException if the node is not present
+   */
+  public DatanodeDetails getNodeDetails(UUID uuid)
+  throws NodeNotFoundException {
+return getNodeInfo(uuid);
+  }
+
+  /**
+   * Returns DatanodeInfo for the given node id.
+   *
+   * @param uuid Node Id
+   *
+   * @return DatanodeInfo of the node
+   *
+   * @throws NodeNotFoundException if the node is not 

[2/2] hadoop git commit: HDDS-212. Introduce NodeStateManager to manage the state of Datanodes in SCM. Contributed by Nanda kumar.

2018-07-04 Thread nanda
HDDS-212. Introduce NodeStateManager to manage the state of Datanodes in SCM. 
Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71df8c27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71df8c27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71df8c27

Branch: refs/heads/trunk
Commit: 71df8c27c9a0e326232d3baf16414a63b5ea5a4b
Parents: 3b63715
Author: Nanda kumar 
Authored: Thu Jul 5 02:11:10 2018 +0530
Committer: Nanda kumar 
Committed: Thu Jul 5 02:11:10 2018 +0530

--
 .../scm/client/ContainerOperationClient.java|   8 +-
 .../hadoop/hdds/protocol/DatanodeDetails.java   |  13 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   4 -
 .../hadoop/hdds/scm/client/ScmClient.java   |   5 +-
 .../StorageContainerLocationProtocol.java   |   5 +-
 ...rLocationProtocolClientSideTranslatorPB.java |   8 +-
 ...rLocationProtocolServerSideTranslatorPB.java |   8 +-
 .../StorageContainerLocationProtocol.proto  |  19 +-
 hadoop-hdds/common/src/main/proto/hdds.proto|  13 +-
 .../common/src/main/resources/ozone-default.xml |  11 -
 .../apache/hadoop/hdds/scm/HddsServerUtil.java  |  11 -
 .../protocol/StorageContainerNodeProtocol.java  |   4 +-
 .../hadoop/hdds/scm/node/DatanodeInfo.java  | 109 
 .../hdds/scm/node/HeartbeatQueueItem.java   |  98 
 .../hadoop/hdds/scm/node/NodeManager.java   |  16 +-
 .../hadoop/hdds/scm/node/NodeStateManager.java  | 575 +++
 .../hadoop/hdds/scm/node/SCMNodeManager.java| 506 ++--
 .../node/states/NodeAlreadyExistsException.java |  45 ++
 .../hdds/scm/node/states/NodeException.java |  44 ++
 .../scm/node/states/NodeNotFoundException.java  |  49 ++
 .../hdds/scm/node/states/NodeStateMap.java  | 281 +
 .../scm/server/SCMClientProtocolServer.java |  60 +-
 .../server/SCMDatanodeHeartbeatDispatcher.java  |   2 +-
 .../scm/server/SCMDatanodeProtocolServer.java   |   2 +-
 .../hdds/scm/container/MockNodeManager.java |  58 +-
 .../hdds/scm/node/TestContainerPlacement.java   |  10 +-
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 176 ++
 .../testutils/ReplicationNodeManagerMock.java   |  37 +-
 .../ozone/TestStorageContainerManager.java  |   4 +-
 .../hadoop/ozone/scm/node/TestQueryNode.java|  19 +-
 .../hadoop/ozone/ksm/KeySpaceManager.java   |   6 +-
 31 files changed, 1288 insertions(+), 918 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
index b04f8c4..e7bdaf0 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
@@ -37,7 +37,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.EnumSet;
 import java.util.List;
 import java.util.UUID;
 
@@ -234,14 +233,14 @@ public class ContainerOperationClient implements 
ScmClient {
   /**
* Returns a set of Nodes that meet a query criteria.
*
-   * @param nodeStatuses - A set of criteria that we want the node to have.
+   * @param nodeStatuses - Criteria that we want the node to have.
* @param queryScope - Query scope - Cluster or pool.
* @param poolName - if it is pool, a pool name is required.
* @return A set of nodes that meet the requested criteria.
* @throws IOException
*/
   @Override
-  public HddsProtos.NodePool queryNode(EnumSet
+  public List queryNode(HddsProtos.NodeState
   nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
   throws IOException {
 return storageContainerLocationClient.queryNode(nodeStatuses, queryScope,
@@ -458,7 +457,8 @@ public class ContainerOperationClient implements ScmClient {
*/
   @Override
   public long getContainerSize(long containerID) throws IOException {
-// TODO : Fix this, it currently returns the capacity but not the current 
usage.
+// TODO : Fix this, it currently returns the capacity
+// but not the current usage.
 long size = getContainerSizeB();
 if (size == -1) {
   throw new IOException("Container size unknown!");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
--
diff --git 

hadoop git commit: HDFS-13528. RBF: If a directory exceeds quota limit then quota usage is not refreshed for other mount entries. Contributed by Dibyendu Karmakar.

2018-07-04 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 844538d66 -> 48addcdbb


HDFS-13528. RBF: If a directory exceeds quota limit then quota usage is not 
refreshed for other mount entries. Contributed by Dibyendu Karmakar.

(cherry picked from commit 3b637155a47d2aa93284969a96208347a647083d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48addcdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48addcdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48addcdb

Branch: refs/heads/branch-3.1
Commit: 48addcdbbe6c858d03c072918bf49b4b89be8a05
Parents: 844538d
Author: Yiqun Lin 
Authored: Wed Jul 4 15:03:24 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Jul 4 15:30:17 2018 +0800

--
 .../hdfs/server/federation/router/Quota.java|   2 +-
 .../router/RouterQuotaUpdateService.java|  43 +++-
 .../federation/router/TestRouterQuota.java  | 212 ++-
 3 files changed, 243 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48addcdb/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
index dbb6ffa..413a4e1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
@@ -199,7 +199,7 @@ public class Quota {
 if (manager != null) {
   Set childrenPaths = manager.getPaths(path);
   for (String childPath : childrenPaths) {
-locations.addAll(rpcServer.getLocationsForPath(childPath, true));
+locations.addAll(rpcServer.getLocationsForPath(childPath, true, 
false));
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48addcdb/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
index 9fc93c1..506e2ee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
@@ -27,6 +27,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
@@ -83,13 +84,40 @@ public class RouterQuotaUpdateService extends 
PeriodicService {
 RouterQuotaUsage oldQuota = entry.getQuota();
 long nsQuota = oldQuota.getQuota();
 long ssQuota = oldQuota.getSpaceQuota();
-// Call RouterRpcServer#getQuotaUsage for getting current quota usage.
-QuotaUsage currentQuotaUsage = this.rpcServer.getQuotaModule()
-.getQuotaUsage(src);
+
+QuotaUsage currentQuotaUsage = null;
+
+// Check whether destination path exists in filesystem. If destination
+// is not present, reset the usage. For other mount entry get current
+// quota usage
+HdfsFileStatus ret = this.rpcServer.getFileInfo(src);
+if (ret == null) {
+  currentQuotaUsage = new RouterQuotaUsage.Builder()
+  .fileAndDirectoryCount(0)
+  .quota(nsQuota)
+  .spaceConsumed(0)
+  .spaceQuota(ssQuota).build();
+} else {
+  // Call RouterRpcServer#getQuotaUsage for getting current quota 
usage.
+  // If any exception occurs catch it and proceed with other entries.
+  try {
+currentQuotaUsage = this.rpcServer.getQuotaModule()
+.getQuotaUsage(src);
+  } catch (IOException ioe) {
+LOG.error("Unable to get quota usage for " + src, ioe);
+continue;
+   

hadoop git commit: HDFS-13528. RBF: If a directory exceeds quota limit then quota usage is not refreshed for other mount entries. Contributed by Dibyendu Karmakar.

2018-07-04 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3e177a1d6 -> 1a96293c6


HDFS-13528. RBF: If a directory exceeds quota limit then quota usage is not 
refreshed for other mount entries. Contributed by Dibyendu Karmakar.

(cherry picked from commit 3b637155a47d2aa93284969a96208347a647083d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a96293c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a96293c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a96293c

Branch: refs/heads/branch-2
Commit: 1a96293c63363016c2d8e106175fe7f2aac97424
Parents: 3e177a1
Author: Yiqun Lin 
Authored: Wed Jul 4 15:03:24 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Jul 4 15:07:47 2018 +0800

--
 .../hdfs/server/federation/router/Quota.java|   2 +-
 .../router/RouterQuotaUpdateService.java|  43 +++-
 .../federation/router/TestRouterQuota.java  | 212 ++-
 3 files changed, 243 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a96293c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
index dbb6ffa..413a4e1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
@@ -199,7 +199,7 @@ public class Quota {
 if (manager != null) {
   Set childrenPaths = manager.getPaths(path);
   for (String childPath : childrenPaths) {
-locations.addAll(rpcServer.getLocationsForPath(childPath, true));
+locations.addAll(rpcServer.getLocationsForPath(childPath, true, 
false));
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a96293c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
index 9fc93c1..506e2ee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
@@ -27,6 +27,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
@@ -83,13 +84,40 @@ public class RouterQuotaUpdateService extends 
PeriodicService {
 RouterQuotaUsage oldQuota = entry.getQuota();
 long nsQuota = oldQuota.getQuota();
 long ssQuota = oldQuota.getSpaceQuota();
-// Call RouterRpcServer#getQuotaUsage for getting current quota usage.
-QuotaUsage currentQuotaUsage = this.rpcServer.getQuotaModule()
-.getQuotaUsage(src);
+
+QuotaUsage currentQuotaUsage = null;
+
+// Check whether destination path exists in filesystem. If destination
+// is not present, reset the usage. For other mount entry get current
+// quota usage
+HdfsFileStatus ret = this.rpcServer.getFileInfo(src);
+if (ret == null) {
+  currentQuotaUsage = new RouterQuotaUsage.Builder()
+  .fileAndDirectoryCount(0)
+  .quota(nsQuota)
+  .spaceConsumed(0)
+  .spaceQuota(ssQuota).build();
+} else {
+  // Call RouterRpcServer#getQuotaUsage for getting current quota 
usage.
+  // If any exception occurs catch it and proceed with other entries.
+  try {
+currentQuotaUsage = this.rpcServer.getQuotaModule()
+.getQuotaUsage(src);
+  } catch (IOException ioe) {
+LOG.error("Unable to get quota usage for " + src, ioe);
+continue;
+   

hadoop git commit: HDFS-13528. RBF: If a directory exceeds quota limit then quota usage is not refreshed for other mount entries. Contributed by Dibyendu Karmakar.

2018-07-04 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7ca4f0cef -> 3b637155a


HDFS-13528. RBF: If a directory exceeds quota limit then quota usage is not 
refreshed for other mount entries. Contributed by Dibyendu Karmakar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b637155
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b637155
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b637155

Branch: refs/heads/trunk
Commit: 3b637155a47d2aa93284969a96208347a647083d
Parents: 7ca4f0c
Author: Yiqun Lin 
Authored: Wed Jul 4 15:03:24 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Jul 4 15:03:24 2018 +0800

--
 .../hdfs/server/federation/router/Quota.java|   2 +-
 .../router/RouterQuotaUpdateService.java|  43 +++-
 .../federation/router/TestRouterQuota.java  | 212 ++-
 3 files changed, 243 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b637155/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
index dbb6ffa..413a4e1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
@@ -199,7 +199,7 @@ public class Quota {
 if (manager != null) {
   Set childrenPaths = manager.getPaths(path);
   for (String childPath : childrenPaths) {
-locations.addAll(rpcServer.getLocationsForPath(childPath, true));
+locations.addAll(rpcServer.getLocationsForPath(childPath, true, 
false));
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b637155/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
index 9fc93c1..506e2ee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
@@ -27,6 +27,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
@@ -83,13 +84,40 @@ public class RouterQuotaUpdateService extends 
PeriodicService {
 RouterQuotaUsage oldQuota = entry.getQuota();
 long nsQuota = oldQuota.getQuota();
 long ssQuota = oldQuota.getSpaceQuota();
-// Call RouterRpcServer#getQuotaUsage for getting current quota usage.
-QuotaUsage currentQuotaUsage = this.rpcServer.getQuotaModule()
-.getQuotaUsage(src);
+
+QuotaUsage currentQuotaUsage = null;
+
+// Check whether destination path exists in filesystem. If destination
+// is not present, reset the usage. For other mount entry get current
+// quota usage
+HdfsFileStatus ret = this.rpcServer.getFileInfo(src);
+if (ret == null) {
+  currentQuotaUsage = new RouterQuotaUsage.Builder()
+  .fileAndDirectoryCount(0)
+  .quota(nsQuota)
+  .spaceConsumed(0)
+  .spaceQuota(ssQuota).build();
+} else {
+  // Call RouterRpcServer#getQuotaUsage for getting current quota 
usage.
+  // If any exception occurs catch it and proceed with other entries.
+  try {
+currentQuotaUsage = this.rpcServer.getQuotaModule()
+.getQuotaUsage(src);
+  } catch (IOException ioe) {
+LOG.error("Unable to get quota usage for " + src, ioe);
+continue;
+  }
+}
+
 // If quota is not set in some subclusters under