Repository: hadoop Updated Branches: refs/heads/HDFS-10285 cee21db17 -> e5bd57145 (forced update)
HDFS-11726. [SPS]: StoragePolicySatisfier should not select same storage type as source and destination in same datanode. Surendra Singh Lilhore. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64575960 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64575960 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64575960 Branch: refs/heads/HDFS-10285 Commit: 64575960597c700fdc0d08ac2c74bb941f8601b5 Parents: 3de4131 Author: Rakesh Radhakrishnan <rake...@apache.org> Authored: Fri Jun 9 14:03:13 2017 +0530 Committer: Rakesh Radhakrishnan <rake...@apache.org> Committed: Tue Jul 31 12:08:57 2018 +0530 ---------------------------------------------------------------------- .../server/namenode/StoragePolicySatisfier.java | 23 ++++++---- .../namenode/TestStoragePolicySatisfier.java | 44 ++++++++++++++++++++ 2 files changed, 58 insertions(+), 9 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/64575960/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java index 9e2a4a0..1b2afa3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java @@ -501,15 +501,20 @@ public class StoragePolicySatisfier implements Runnable { // avoid choosing a target which already has this block. for (int i = 0; i < sourceWithStorageList.size(); i++) { StorageTypeNodePair existingTypeNodePair = sourceWithStorageList.get(i); - StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(blockInfo, - existingTypeNodePair.dn, expected); - if (chosenTarget != null) { - sourceNodes.add(existingTypeNodePair.dn); - sourceStorageTypes.add(existingTypeNodePair.storageType); - targetNodes.add(chosenTarget.dn); - targetStorageTypes.add(chosenTarget.storageType); - expected.remove(chosenTarget.storageType); - // TODO: We can increment scheduled block count for this node? + + // Check whether the block replica is already placed in the expected + // storage type in this source datanode. + if (!expected.contains(existingTypeNodePair.storageType)) { + StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode( + blockInfo, existingTypeNodePair.dn, expected); + if (chosenTarget != null) { + sourceNodes.add(existingTypeNodePair.dn); + sourceStorageTypes.add(existingTypeNodePair.storageType); + targetNodes.add(chosenTarget.dn); + targetStorageTypes.add(chosenTarget.storageType); + expected.remove(chosenTarget.storageType); + // TODO: We can increment scheduled block count for this node? + } } // To avoid choosing this excludeNodes as targets later excludeNodes.add(existingTypeNodePair.dn); http://git-wip-us.apache.org/repos/asf/hadoop/blob/64575960/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java index 8e08a1e..f1a4169 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java @@ -764,6 +764,50 @@ public class TestStoragePolicySatisfier { } /** + * If replica with expected storage type already exist in source DN then that + * DN should be skipped. + */ + @Test(timeout = 300000) + public void testSPSWhenReplicaWithExpectedStorageAlreadyAvailableInSource() + throws Exception { + StorageType[][] diskTypes = new StorageType[][] { + {StorageType.DISK, StorageType.ARCHIVE}, + {StorageType.DISK, StorageType.ARCHIVE}, + {StorageType.DISK, StorageType.ARCHIVE}}; + + try { + hdfsCluster = startCluster(config, diskTypes, diskTypes.length, + storagesPerDatanode, capacity); + dfs = hdfsCluster.getFileSystem(); + // 1. Write two replica on disk + DFSTestUtil.createFile(dfs, new Path(file), DEFAULT_BLOCK_SIZE, + (short) 2, 0); + // 2. Change policy to COLD, so third replica will be written to ARCHIVE. + dfs.setStoragePolicy(new Path(file), "COLD"); + + // 3.Change replication factor to 3. + dfs.setReplication(new Path(file), (short) 3); + + DFSTestUtil + .waitExpectedStorageType(file, StorageType.DISK, 2, 30000, dfs); + DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000, + dfs); + + // 4. Change policy to HOT, so we can move the all block to DISK. + dfs.setStoragePolicy(new Path(file), "HOT"); + + // 4. Satisfy the policy. + dfs.satisfyStoragePolicy(new Path(file)); + + // 5. Block should move successfully . + DFSTestUtil + .waitExpectedStorageType(file, StorageType.DISK, 3, 30000, dfs); + } finally { + shutdownCluster(); + } + } + + /** * Tests that movements should not be assigned when there is no space in * target DN. */ --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org