http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b83110d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java index 0e3a5a3..2257608 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assert; @@ -103,8 +104,8 @@ public class TestStoragePolicySatisfierWithStripedFile { } final Configuration conf = new HdfsConfiguration(); - conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, - true); + conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, + StoragePolicySatisfierMode.INTERNAL.toString()); initConfWithStripe(conf, defaultStripeBlockSize); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numOfDatanodes) @@ -216,8 +217,8 @@ public class TestStoragePolicySatisfierWithStripedFile { } final Configuration conf = new HdfsConfiguration(); - conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, - true); + conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, + StoragePolicySatisfierMode.INTERNAL.toString()); initConfWithStripe(conf, defaultStripeBlockSize); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numOfDatanodes) @@ -328,8 +329,8 @@ public class TestStoragePolicySatisfierWithStripedFile { conf.set(DFSConfigKeys .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY, "3000"); - conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, - true); + conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, + StoragePolicySatisfierMode.INTERNAL.toString()); initConfWithStripe(conf, defaultStripeBlockSize); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numOfDatanodes) @@ -420,8 +421,8 @@ public class TestStoragePolicySatisfierWithStripedFile { } final Configuration conf = new HdfsConfiguration(); - conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, - true); + conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, + StoragePolicySatisfierMode.INTERNAL.toString()); initConfWithStripe(conf, defaultStripeBlockSize); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numOfDatanodes)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b83110d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java index 9a401bd..42b04da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode; import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.sps.BlockMovementListener; @@ -54,12 +55,19 @@ public class TestExternalStoragePolicySatisfier new StorageType[][]{{StorageType.DISK, StorageType.DISK}, {StorageType.DISK, StorageType.DISK}, {StorageType.DISK, StorageType.DISK}}; + private NameNodeConnector nnc; + + @Override + public void setUp() { + super.setUp(); + + getConf().set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, + StoragePolicySatisfierMode.EXTERNAL.toString()); + } @Override public void createCluster() throws IOException { getConf().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE); - getConf().setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, - true); setCluster(startCluster(getConf(), allDiskTypes, NUM_OF_DATANODES, STORAGES_PER_DATANODE, CAPACITY)); getFS(); @@ -80,35 +88,75 @@ public class TestExternalStoragePolicySatisfier .numDataNodes(numberOfDatanodes).storagesPerDatanode(storagesPerDn) .storageTypes(storageTypes).storageCapacities(capacities).build(); cluster.waitActive(); - if (conf.getBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, - false)) { - BlockManager blkMgr = cluster.getNameNode().getNamesystem() - .getBlockManager(); - SPSService spsService = blkMgr.getSPSService(); - spsService.stopGracefully(); - - IntraSPSNameNodeContext context = new IntraSPSNameNodeContext( - cluster.getNameNode().getNamesystem(), - blkMgr, blkMgr.getSPSService()); - ExternalBlockMovementListener blkMoveListener = - new ExternalBlockMovementListener(); - ExternalSPSBlockMoveTaskHandler externalHandler = - new ExternalSPSBlockMoveTaskHandler(conf, getNameNodeConnector(conf), - blkMgr.getSPSService()); - externalHandler.init(); - spsService.init(context, - new ExternalSPSFileIDCollector(context, blkMgr.getSPSService(), 5), - externalHandler, - blkMoveListener); - spsService.start(true); - } + + nnc = getNameNodeConnector(getConf()); + + BlockManager blkMgr = cluster.getNameNode().getNamesystem() + .getBlockManager(); + SPSService spsService = blkMgr.getSPSService(); + spsService.stopGracefully(); + + // TODO: Since External is not fully implemented, just used INTERNAL now. + // Need to set External context here. + IntraSPSNameNodeContext context = new IntraSPSNameNodeContext( + cluster.getNameNode().getNamesystem(), blkMgr, blkMgr.getSPSService()) { + public boolean isRunning() { + return true; + }; + }; + ExternalBlockMovementListener blkMoveListener = + new ExternalBlockMovementListener(); + ExternalSPSBlockMoveTaskHandler externalHandler = + new ExternalSPSBlockMoveTaskHandler(conf, nnc, + blkMgr.getSPSService()); + externalHandler.init(); + spsService.init(context, + new ExternalSPSFileIDCollector(context, blkMgr.getSPSService()), + externalHandler, + blkMoveListener); + spsService.start(true, StoragePolicySatisfierMode.EXTERNAL); return cluster; } + public void restartNamenode() throws IOException{ + BlockManager blkMgr = getCluster().getNameNode().getNamesystem() + .getBlockManager(); + SPSService spsService = blkMgr.getSPSService(); + spsService.stopGracefully(); + + getCluster().restartNameNodes(); + getCluster().waitActive(); + blkMgr = getCluster().getNameNode().getNamesystem() + .getBlockManager(); + spsService = blkMgr.getSPSService(); + spsService.stopGracefully(); + + // TODO: Since External is not fully implemented, just used INTERNAL now. + // Need to set External context here. + IntraSPSNameNodeContext context = new IntraSPSNameNodeContext( + getCluster().getNameNode().getNamesystem(), blkMgr, + blkMgr.getSPSService()) { + public boolean isRunning() { + return true; + }; + }; + ExternalBlockMovementListener blkMoveListener = + new ExternalBlockMovementListener(); + ExternalSPSBlockMoveTaskHandler externalHandler = + new ExternalSPSBlockMoveTaskHandler(getConf(), nnc, + blkMgr.getSPSService()); + externalHandler.init(); + spsService.init(context, + new ExternalSPSFileIDCollector(context, blkMgr.getSPSService()), + externalHandler, + blkMoveListener); + spsService.start(true, StoragePolicySatisfierMode.EXTERNAL); + } + @Override public FileIdCollector createFileIdCollector(StoragePolicySatisfier sps, Context ctxt) { - return new ExternalSPSFileIDCollector(ctxt, sps, 5); + return new ExternalSPSFileIDCollector(ctxt, sps); } private class ExternalBlockMovementListener implements BlockMovementListener { @@ -148,4 +196,18 @@ public class TestExternalStoragePolicySatisfier + " So, ignoring it.") public void testBatchProcessingForSPSDirectory() throws Exception { } + + /** + * Status won't be supported for external SPS, now. So, ignoring it. + */ + @Ignore("Status is not supported for external SPS. So, ignoring it.") + public void testStoragePolicySatisfyPathStatus() throws Exception { + } + + /** + * Status won't be supported for external SPS, now. So, ignoring it. + */ + @Ignore("Status is not supported for external SPS. So, ignoring it.") + public void testMaxRetryForFailedBlock() throws Exception { + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b83110d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java index 7e0663d..28838a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.junit.After; import org.junit.Before; @@ -48,8 +49,8 @@ public class TestStoragePolicyCommands { @Before public void clusterSetUp() throws IOException, URISyntaxException { conf = new HdfsConfiguration(); - conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, - true); + conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, + StoragePolicySatisfierMode.INTERNAL.toString()); StorageType[][] newtypes = new StorageType[][] { {StorageType.ARCHIVE, StorageType.DISK}}; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL) http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b83110d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java index 856c3ec..6a30c03 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -46,8 +47,8 @@ public class TestStoragePolicySatisfyAdminCommands { @Before public void clusterSetUp() throws IOException, URISyntaxException { conf = new HdfsConfiguration(); - conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, - true); + conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, + StoragePolicySatisfierMode.INTERNAL.toString()); StorageType[][] newtypes = new StorageType[][] { {StorageType.ARCHIVE, StorageType.DISK}}; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL) @@ -94,16 +95,17 @@ public class TestStoragePolicySatisfyAdminCommands { final String file = "/testIsSatisfierRunningCommand"; DFSTestUtil.createFile(dfs, new Path(file), SIZE, REPL, 0); final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf); - DFSTestUtil.toolRun(admin, "-isSatisfierRunning", 0, "yes"); + DFSTestUtil.toolRun(admin, "-isInternalSatisfierRunning", 0, "yes"); cluster.getNameNode().reconfigureProperty( - DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "false"); + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, + StoragePolicySatisfierMode.NONE.toString()); cluster.waitActive(); - DFSTestUtil.toolRun(admin, "-isSatisfierRunning", 0, "no"); + DFSTestUtil.toolRun(admin, "-isInternalSatisfierRunning", 0, "no"); // Test with unnecessary args - DFSTestUtil.toolRun(admin, "-isSatisfierRunning status", 1, + DFSTestUtil.toolRun(admin, "-isInternalSatisfierRunning status", 1, "Can't understand arguments: "); } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org