hadoop git commit: HDFS-13048. LowRedundancyReplicatedBlocks metric can be negative
Repository: hadoop Updated Branches: refs/heads/trunk b0627c891 -> 4aef8bd2e HDFS-13048. LowRedundancyReplicatedBlocks metric can be negative Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4aef8bd2 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4aef8bd2 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4aef8bd2 Branch: refs/heads/trunk Commit: 4aef8bd2efd68bf96c077ddda1538dcd5691b437 Parents: b0627c8 Author: Akira AjisakaAuthored: Fri Feb 2 14:33:56 2018 +0900 Committer: Akira Ajisaka Committed: Fri Feb 2 14:34:07 2018 +0900 -- .../server/blockmanagement/LowRedundancyBlocks.java | 2 +- .../TestLowRedundancyBlockQueues.java| 15 +++ 2 files changed, 16 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aef8bd2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java index 347d606..e3f228d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java @@ -365,7 +365,7 @@ class LowRedundancyBlocks implements Iterable { NameNode.blockStateChangeLog.debug( "BLOCK* NameSystem.LowRedundancyBlock.remove: Removing block" + " {} from priority queue {}", block, i); - decrementBlockStat(block, priLevel, oldExpectedReplicas); + decrementBlockStat(block, i, oldExpectedReplicas); return true; } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aef8bd2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java index 2b28f1e..0681a0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java @@ -124,6 +124,21 @@ public class TestLowRedundancyBlockQueues { } @Test + public void testRemoveWithWrongPriority() { +final LowRedundancyBlocks queues = new LowRedundancyBlocks(); +final BlockInfo corruptBlock = genBlockInfo(1); +assertAdded(queues, corruptBlock, 0, 0, 3); +assertInLevel(queues, corruptBlock, +LowRedundancyBlocks.QUEUE_WITH_CORRUPT_BLOCKS); +verifyBlockStats(queues, 0, 1, 0, 0, 0); + +// Remove with wrong priority +queues.remove(corruptBlock, LowRedundancyBlocks.QUEUE_LOW_REDUNDANCY); +// Verify the number of corrupt block is decremented +verifyBlockStats(queues, 0, 0, 0, 0, 0); + } + + @Test public void testStripedBlockPriorities() throws Throwable { int dataBlkNum = ecPolicy.getNumDataUnits(); int parityBlkNUm = ecPolicy.getNumParityUnits(); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-13048. LowRedundancyReplicatedBlocks metric can be negative
Repository: hadoop Updated Branches: refs/heads/branch-3.0 abdf8abf1 -> 01661d4f9 HDFS-13048. LowRedundancyReplicatedBlocks metric can be negative (cherry picked from commit 4aef8bd2efd68bf96c077ddda1538dcd5691b437) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01661d4f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01661d4f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01661d4f Branch: refs/heads/branch-3.0 Commit: 01661d4f903ce5358573ae9e823b8a9dbe543eef Parents: abdf8ab Author: Akira AjisakaAuthored: Fri Feb 2 14:33:56 2018 +0900 Committer: Akira Ajisaka Committed: Fri Feb 2 14:35:11 2018 +0900 -- .../server/blockmanagement/LowRedundancyBlocks.java | 2 +- .../TestLowRedundancyBlockQueues.java| 15 +++ 2 files changed, 16 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/01661d4f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java index 347d606..e3f228d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java @@ -365,7 +365,7 @@ class LowRedundancyBlocks implements Iterable { NameNode.blockStateChangeLog.debug( "BLOCK* NameSystem.LowRedundancyBlock.remove: Removing block" + " {} from priority queue {}", block, i); - decrementBlockStat(block, priLevel, oldExpectedReplicas); + decrementBlockStat(block, i, oldExpectedReplicas); return true; } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/01661d4f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java index 2b28f1e..0681a0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java @@ -124,6 +124,21 @@ public class TestLowRedundancyBlockQueues { } @Test + public void testRemoveWithWrongPriority() { +final LowRedundancyBlocks queues = new LowRedundancyBlocks(); +final BlockInfo corruptBlock = genBlockInfo(1); +assertAdded(queues, corruptBlock, 0, 0, 3); +assertInLevel(queues, corruptBlock, +LowRedundancyBlocks.QUEUE_WITH_CORRUPT_BLOCKS); +verifyBlockStats(queues, 0, 1, 0, 0, 0); + +// Remove with wrong priority +queues.remove(corruptBlock, LowRedundancyBlocks.QUEUE_LOW_REDUNDANCY); +// Verify the number of corrupt block is decremented +verifyBlockStats(queues, 0, 0, 0, 0, 0); + } + + @Test public void testStripedBlockPriorities() throws Throwable { int dataBlkNum = ecPolicy.getNumDataUnits(); int parityBlkNUm = ecPolicy.getNumParityUnits(); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: YARN-7840. Update PB for prefix support of node attributes. Contributed by Naganarasimha G R.
Repository: hadoop Updated Branches: refs/heads/YARN-3409 0f3f89ac9 -> 4f6a6ff52 YARN-7840. Update PB for prefix support of node attributes. Contributed by Naganarasimha G R. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f6a6ff5 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f6a6ff5 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f6a6ff5 Branch: refs/heads/YARN-3409 Commit: 4f6a6ff52e5587ac20b6ff98120b325da980c993 Parents: 0f3f89a Author: bibinchundattAuthored: Fri Feb 2 10:31:00 2018 +0530 Committer: bibinchundatt Committed: Fri Feb 2 10:31:00 2018 +0530 -- .../hadoop/yarn/api/records/NodeAttribute.java | 22 ++- .../src/main/proto/yarn_protos.proto| 7 ++-- .../records/impl/pb/NodeAttributePBImpl.java| 39 +--- .../hadoop/yarn/api/TestPBImplRecords.java | 7 ++-- 4 files changed, 61 insertions(+), 14 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f6a6ff5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java index 13081f3..01c70b2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java @@ -37,15 +37,27 @@ import org.apache.hadoop.yarn.util.Records; * Its not compulsory for all the attributes to have value, empty string is the * default value of the NodeAttributeType.STRING * - * + * + * Node Attribute Prefix is used as namespace to segregate the attributes. + * */ @Public @Unstable public abstract class NodeAttribute { + public static final String DEFAULT_PREFIX = ""; + public static NodeAttribute newInstance(String attributeName, NodeAttributeType attributeType, String attributeValue) { +return newInstance(DEFAULT_PREFIX, attributeName, attributeType, +attributeValue); + } + + public static NodeAttribute newInstance(String attributePrefix, + String attributeName, NodeAttributeType attributeType, + String attributeValue) { NodeAttribute nodeAttribute = Records.newRecord(NodeAttribute.class); +nodeAttribute.setAttributePrefix(attributePrefix); nodeAttribute.setAttributeName(attributeName); nodeAttribute.setAttributeType(attributeType); nodeAttribute.setAttributeValue(attributeValue); @@ -54,6 +66,14 @@ public abstract class NodeAttribute { @Public @Unstable + public abstract String getAttributePrefix(); + + @Public + @Unstable + public abstract void setAttributePrefix(String attributePrefix); + + @Public + @Unstable public abstract String getAttributeName(); @Public http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f6a6ff5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index 8436942..e1c9db6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -375,9 +375,10 @@ enum NodeAttributeTypeProto { } message NodeAttributeProto { - optional string attributeName = 1; - optional NodeAttributeTypeProto attributeType = 2; - optional string attributeValue = 3; + optional string attributePrefix = 1; + required string attributeName = 2; + optional NodeAttributeTypeProto attributeType = 3 [default = STRING]; + optional string attributeValue = 4 [default=""]; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f6a6ff5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeAttributePBImpl.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeAttributePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeAttributePBImpl.java index 11c9c48..7810939 100644 ---
hadoop git commit: HDFS-13068. RBF: Add router admin option to manage safe mode. Contributed by Yiqun Lin.
Repository: hadoop Updated Branches: refs/heads/trunk aa45faf0b -> b0627c891 HDFS-13068. RBF: Add router admin option to manage safe mode. Contributed by Yiqun Lin. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0627c89 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0627c89 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0627c89 Branch: refs/heads/trunk Commit: b0627c891b0e90e29dab2bec64a01c2c2ffe4ed0 Parents: aa45faf Author: Yiqun LinAuthored: Fri Feb 2 11:25:41 2018 +0800 Committer: Yiqun Lin Committed: Fri Feb 2 11:25:41 2018 +0800 -- ...uterAdminProtocolServerSideTranslatorPB.java | 60 .../RouterAdminProtocolTranslatorPB.java| 60 +++- .../federation/router/RouterAdminServer.java| 46 +++- .../server/federation/router/RouterClient.java | 4 ++ .../federation/router/RouterStateManager.java | 50 + .../store/protocol/EnterSafeModeRequest.java| 32 + .../store/protocol/EnterSafeModeResponse.java | 50 + .../store/protocol/GetSafeModeRequest.java | 31 .../store/protocol/GetSafeModeResponse.java | 49 + .../store/protocol/LeaveSafeModeRequest.java| 32 + .../store/protocol/LeaveSafeModeResponse.java | 50 + .../impl/pb/EnterSafeModeRequestPBImpl.java | 62 .../impl/pb/EnterSafeModeResponsePBImpl.java| 73 +++ .../impl/pb/GetSafeModeRequestPBImpl.java | 62 .../impl/pb/GetSafeModeResponsePBImpl.java | 73 +++ .../impl/pb/LeaveSafeModeRequestPBImpl.java | 62 .../impl/pb/LeaveSafeModeResponsePBImpl.java| 73 +++ .../hdfs/tools/federation/RouterAdmin.java | 75 +++- .../src/main/proto/FederationProtocol.proto | 25 +++ .../src/main/proto/RouterProtocol.proto | 15 .../src/site/markdown/HDFSCommands.md | 2 + .../src/site/markdown/HDFSRouterFederation.md | 6 +- .../federation/router/TestRouterAdminCLI.java | 48 + 23 files changed, 1036 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0627c89/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java index 415bbd9..159d5c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java @@ -23,8 +23,14 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryRequestProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryResponseProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeRequestProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeResponseProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesRequestProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesResponseProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeRequestProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeResponseProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeRequestProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeResponseProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto; import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto; @@ -32,16 +38,28 @@ import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProt
hadoop git commit: HDFS-12942. Synchronization issue in FSDataSetImpl#moveBlock. Contributed by Ajay Kumar.
Repository: hadoop Updated Branches: refs/heads/trunk 09dd709d6 -> aa45faf0b HDFS-12942. Synchronization issue in FSDataSetImpl#moveBlock. Contributed by Ajay Kumar. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa45faf0 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa45faf0 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa45faf0 Branch: refs/heads/trunk Commit: aa45faf0b20c922b0d147ece9fa01fb95a5b0dec Parents: 09dd709 Author: Anu EngineerAuthored: Thu Feb 1 18:03:01 2018 -0800 Committer: Anu Engineer Committed: Thu Feb 1 18:03:01 2018 -0800 -- .../datanode/fsdataset/impl/FsDatasetImpl.java | 89 +--- .../fsdataset/impl/TestFsDatasetImpl.java | 103 ++- 2 files changed, 175 insertions(+), 17 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa45faf0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index e0f7809..8e7884d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -971,24 +971,72 @@ class FsDatasetImpl implements FsDatasetSpi { * @throws IOException */ private ReplicaInfo moveBlock(ExtendedBlock block, ReplicaInfo replicaInfo, -FsVolumeReference volumeRef) throws - IOException { + FsVolumeReference volumeRef) throws IOException { +ReplicaInfo newReplicaInfo = copyReplicaToVolume(block, replicaInfo, +volumeRef); +finalizeNewReplica(newReplicaInfo, block); +removeOldReplica(replicaInfo, newReplicaInfo, block.getBlockPoolId()); +return newReplicaInfo; + } + + /** + * Cleanup the replicaInfo object passed. + * + * @param bpid - block pool id + * @param replicaInfo- ReplicaInfo + */ + private void cleanupReplica(String bpid, ReplicaInfo replicaInfo) { +if (replicaInfo.deleteBlockData() || !replicaInfo.blockDataExists()) { + FsVolumeImpl volume = (FsVolumeImpl) replicaInfo.getVolume(); + volume.onBlockFileDeletion(bpid, replicaInfo.getBytesOnDisk()); + if (replicaInfo.deleteMetadata() || !replicaInfo.metadataExists()) { +volume.onMetaFileDeletion(bpid, replicaInfo.getMetadataLength()); + } +} + } + /** + * Create a new temporary replica of replicaInfo object in specified volume. + * + * @param block - Extended Block + * @param replicaInfo - ReplicaInfo + * @param volumeRef - Volume Ref - Closed by caller. + * @return newReplicaInfo new replica object created in specified volume. + * @throws IOException + */ + @VisibleForTesting + ReplicaInfo copyReplicaToVolume(ExtendedBlock block, ReplicaInfo replicaInfo, + FsVolumeReference volumeRef) throws IOException { FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume(); // Copy files to temp dir first ReplicaInfo newReplicaInfo = targetVolume.moveBlockToTmpLocation(block, replicaInfo, smallBufferSize, conf); +return newReplicaInfo; + } + /** + * Finalizes newReplica by calling finalizeReplica internally. + * + * @param newReplicaInfo - ReplicaInfo + * @param block - Extended Block + * @throws IOException + */ + @VisibleForTesting + void finalizeNewReplica(ReplicaInfo newReplicaInfo, + ExtendedBlock block) throws IOException { // Finalize the copied files -newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo); -try (AutoCloseableLock lock = datasetLock.acquire()) { - // Increment numBlocks here as this block moved without knowing to BPS +try { + String bpid = block.getBlockPoolId(); + finalizeReplica(bpid, newReplicaInfo); FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume(); - volume.incrNumBlocks(block.getBlockPoolId()); + volume.incrNumBlocks(bpid); +} catch (IOException ioe) { + // Cleanup block data and metadata + // Decrement of dfsUsed and noOfBlocks for volume not required + newReplicaInfo.deleteBlockData(); + newReplicaInfo.deleteMetadata(); + throw ioe; } - -removeOldReplica(replicaInfo, newReplicaInfo, block.getBlockPoolId()); -return newReplicaInfo; }
hadoop git commit: HADOOP-15197. Remove tomcat from the Hadoop-auth test bundle.
Repository: hadoop Updated Branches: refs/heads/branch-3.0.1 7491aaeb8 -> 7142d8734 HADOOP-15197. Remove tomcat from the Hadoop-auth test bundle. (cherry picked from commit 09dd709d6e4022cd68187c4da1483d98ffc7e15a) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7142d873 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7142d873 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7142d873 Branch: refs/heads/branch-3.0.1 Commit: 7142d87344a91468913558f7a9ca773e02a01a8d Parents: 7491aae Author: Xiao ChenAuthored: Thu Feb 1 15:33:36 2018 -0800 Committer: Xiao Chen Committed: Thu Feb 1 15:35:40 2018 -0800 -- hadoop-common-project/hadoop-auth/pom.xml | 10 .../client/AuthenticatorTestCase.java | 51 +--- .../client/TestKerberosAuthenticator.java | 41 ++-- hadoop-project/pom.xml | 10 4 files changed, 16 insertions(+), 96 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7142d873/hadoop-common-project/hadoop-auth/pom.xml -- diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 2cf8c7d..263ee81 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -66,16 +66,6 @@ jetty-servlet test - - org.apache.tomcat.embed - tomcat-embed-core - test - - - org.apache.tomcat.embed - tomcat-embed-logging-juli - test - javax.servlet javax.servlet-api http://git-wip-us.apache.org/repos/asf/hadoop/blob/7142d873/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java -- diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java index 8b9d45e..969cd7b 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java @@ -13,9 +13,6 @@ */ package org.apache.hadoop.security.authentication.client; -import org.apache.catalina.deploy.FilterDef; -import org.apache.catalina.deploy.FilterMap; -import org.apache.catalina.startup.Tomcat; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.http.HttpResponse; import org.apache.http.auth.AuthScope; @@ -45,7 +42,6 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.BufferedReader; import java.io.ByteArrayInputStream; -import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -65,18 +61,12 @@ public class AuthenticatorTestCase { private Server server; private String host = null; private int port = -1; - private boolean useTomcat = false; - private Tomcat tomcat = null; ServletContextHandler context; private static Properties authenticatorConfig; public AuthenticatorTestCase() {} - public AuthenticatorTestCase(boolean useTomcat) { -this.useTomcat = useTomcat; - } - protected static void setAuthenticationHandlerConfig(Properties config) { authenticatorConfig = config; } @@ -120,8 +110,7 @@ public class AuthenticatorTestCase { } protected void start() throws Exception { -if (useTomcat) startTomcat(); -else startJetty(); +startJetty(); } protected void startJetty() throws Exception { @@ -142,32 +131,8 @@ public class AuthenticatorTestCase { System.out.println("Running embedded servlet container at: http://; + host + ":" + port); } - protected void startTomcat() throws Exception { -tomcat = new Tomcat(); -File base = new File(System.getProperty("java.io.tmpdir")); -org.apache.catalina.Context ctx = - tomcat.addContext("/foo",base.getAbsolutePath()); -FilterDef fd = new FilterDef(); -fd.setFilterClass(TestFilter.class.getName()); -fd.setFilterName("TestFilter"); -FilterMap fm = new FilterMap(); -fm.setFilterName("TestFilter"); -fm.addURLPattern("/*"); -fm.addServletName("/bar"); -ctx.addFilterDef(fd); -ctx.addFilterMap(fm); -tomcat.addServlet(ctx, "/bar", TestServlet.class.getName()); -ctx.addServletMapping("/bar", "/bar"); -host =
hadoop git commit: HADOOP-15197. Remove tomcat from the Hadoop-auth test bundle.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 992d7b573 -> abdf8abf1 HADOOP-15197. Remove tomcat from the Hadoop-auth test bundle. (cherry picked from commit 09dd709d6e4022cd68187c4da1483d98ffc7e15a) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/abdf8abf Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/abdf8abf Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/abdf8abf Branch: refs/heads/branch-3.0 Commit: abdf8abf1a4a7f43d020c7ada48b90f695bc561d Parents: 992d7b5 Author: Xiao ChenAuthored: Thu Feb 1 15:33:36 2018 -0800 Committer: Xiao Chen Committed: Thu Feb 1 15:35:27 2018 -0800 -- hadoop-common-project/hadoop-auth/pom.xml | 10 .../client/AuthenticatorTestCase.java | 51 +--- .../client/TestKerberosAuthenticator.java | 41 ++-- hadoop-project/pom.xml | 10 4 files changed, 16 insertions(+), 96 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/abdf8abf/hadoop-common-project/hadoop-auth/pom.xml -- diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 2cf8c7d..263ee81 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -66,16 +66,6 @@ jetty-servlet test - - org.apache.tomcat.embed - tomcat-embed-core - test - - - org.apache.tomcat.embed - tomcat-embed-logging-juli - test - javax.servlet javax.servlet-api http://git-wip-us.apache.org/repos/asf/hadoop/blob/abdf8abf/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java -- diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java index 8b9d45e..969cd7b 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java @@ -13,9 +13,6 @@ */ package org.apache.hadoop.security.authentication.client; -import org.apache.catalina.deploy.FilterDef; -import org.apache.catalina.deploy.FilterMap; -import org.apache.catalina.startup.Tomcat; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.http.HttpResponse; import org.apache.http.auth.AuthScope; @@ -45,7 +42,6 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.BufferedReader; import java.io.ByteArrayInputStream; -import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -65,18 +61,12 @@ public class AuthenticatorTestCase { private Server server; private String host = null; private int port = -1; - private boolean useTomcat = false; - private Tomcat tomcat = null; ServletContextHandler context; private static Properties authenticatorConfig; public AuthenticatorTestCase() {} - public AuthenticatorTestCase(boolean useTomcat) { -this.useTomcat = useTomcat; - } - protected static void setAuthenticationHandlerConfig(Properties config) { authenticatorConfig = config; } @@ -120,8 +110,7 @@ public class AuthenticatorTestCase { } protected void start() throws Exception { -if (useTomcat) startTomcat(); -else startJetty(); +startJetty(); } protected void startJetty() throws Exception { @@ -142,32 +131,8 @@ public class AuthenticatorTestCase { System.out.println("Running embedded servlet container at: http://; + host + ":" + port); } - protected void startTomcat() throws Exception { -tomcat = new Tomcat(); -File base = new File(System.getProperty("java.io.tmpdir")); -org.apache.catalina.Context ctx = - tomcat.addContext("/foo",base.getAbsolutePath()); -FilterDef fd = new FilterDef(); -fd.setFilterClass(TestFilter.class.getName()); -fd.setFilterName("TestFilter"); -FilterMap fm = new FilterMap(); -fm.setFilterName("TestFilter"); -fm.addURLPattern("/*"); -fm.addServletName("/bar"); -ctx.addFilterDef(fd); -ctx.addFilterMap(fm); -tomcat.addServlet(ctx, "/bar", TestServlet.class.getName()); -ctx.addServletMapping("/bar", "/bar"); -host =
hadoop git commit: YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong)
Repository: hadoop Updated Branches: refs/heads/YARN-7402 1702dfa7f -> fd03fd45d YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd03fd45 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd03fd45 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd03fd45 Branch: refs/heads/YARN-7402 Commit: fd03fd45df84b45aefee347a6c1f89a05cd7fe68 Parents: 1702dfa Author: Botong HuangAuthored: Thu Feb 1 14:43:48 2018 -0800 Committer: Botong Huang Committed: Thu Feb 1 14:43:48 2018 -0800 -- .../dev-support/findbugs-exclude.xml| 5 + .../hadoop/yarn/conf/YarnConfiguration.java | 18 +++ .../src/main/resources/yarn-default.xml | 24 .../store/impl/MemoryFederationStateStore.java | 13 ++ .../utils/FederationStateStoreFacade.java | 41 ++- .../GlobalPolicyGenerator.java | 92 ++- .../subclustercleaner/SubClusterCleaner.java| 109 + .../subclustercleaner/package-info.java | 19 +++ .../TestSubClusterCleaner.java | 118 +++ 9 files changed, 409 insertions(+), 30 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd03fd45/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index 6a10312..d4ab8f5 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -380,6 +380,11 @@ + + + + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd03fd45/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 271b666..eabe413 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -3160,6 +3160,24 @@ public class YarnConfiguration extends Configuration { public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED = false; + private static final String FEDERATION_GPG_PREFIX = + FEDERATION_PREFIX + "gpg."; + + // The number of threads to use for the GPG scheduled executor service + public static final String GPG_SCHEDULED_EXECUTOR_THREADS = + FEDERATION_GPG_PREFIX + "scheduled.executor.threads"; + public static final int DEFAULT_GPG_SCHEDULED_EXECUTOR_THREADS = 10; + + // The interval at which the subcluster cleaner runs, -1 means disabled + public static final String GPG_SUBCLUSTER_CLEANER_INTERVAL_MS = + FEDERATION_GPG_PREFIX + "subcluster.cleaner.interval-ms"; + public static final long DEFAULT_GPG_SUBCLUSTER_CLEANER_INTERVAL_MS = -1; + + // The expiration time for a subcluster heartbeat, default is 30 minutes + public static final String GPG_SUBCLUSTER_EXPIRATION_MS = + FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms"; + public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 180; + // Other Configs http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd03fd45/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 017799a..899c210 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -3403,6 +3403,30 @@ + The number of threads to use for the GPG scheduled executor service. + +yarn.federation.gpg.scheduled.executor.threads +10 + + + + + The interval at which the subcluster cleaner runs, -1 means disabled. + +yarn.federation.gpg.subcluster.cleaner.interval-ms +-1 + + + + +
hadoop git commit: HDFS-13062. Provide support for JN to use separate journal disk per namespace. Contributed by Bharat Viswanadham.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 784b45412 -> 992d7b573 HDFS-13062. Provide support for JN to use separate journal disk per namespace. Contributed by Bharat Viswanadham. (cherry picked from commit dd50f53997239bf9078481cf46592ca3e41520b5) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/992d7b57 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/992d7b57 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/992d7b57 Branch: refs/heads/branch-3.0 Commit: 992d7b5739e195b5eabd33f38e2566a1e04cbe2e Parents: 784b454 Author: Hanisha KoneruAuthored: Wed Jan 31 16:34:48 2018 -0800 Committer: Hanisha Koneru Committed: Thu Feb 1 12:32:47 2018 -0800 -- .../hdfs/qjournal/server/JournalNode.java | 129 ++-- .../hdfs/qjournal/server/TestJournalNode.java | 148 --- 2 files changed, 211 insertions(+), 66 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/992d7b57/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index 0954eaf..c772dfc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -17,18 +17,10 @@ */ package org.apache.hadoop.hdfs.qjournal.server; -import static org.apache.hadoop.util.ExitUtil.terminate; - -import java.io.File; -import java.io.FileFilter; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.HashMap; -import java.util.Map; - -import javax.management.ObjectName; - import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -47,14 +39,22 @@ import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.tracing.TraceUtils; import org.apache.hadoop.util.DiskChecker; +import static org.apache.hadoop.util.ExitUtil.terminate; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.htrace.core.Tracer; import org.eclipse.jetty.util.ajax.JSON; -import com.google.common.base.Preconditions; -import com.google.common.collect.Maps; +import javax.management.ObjectName; +import java.io.File; +import java.io.FileFilter; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; /** * The JournalNode is a daemon which allows namenodes using @@ -74,7 +74,7 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean { .newHashMap(); private ObjectName journalNodeInfoBeanName; private String httpServerURI; - private File localDir; + private final ArrayList localDir = Lists.newArrayList(); Tracer tracer; static { @@ -94,11 +94,10 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean { Journal journal = journalsById.get(jid); if (journal == null) { - File logDir = getLogDir(jid); - LOG.info("Initializing journal in directory " + logDir); + File logDir = getLogDir(jid, nameServiceId); + LOG.info("Initializing journal in directory " + logDir); journal = new Journal(conf, logDir, jid, startOpt, new ErrorReporter()); journalsById.put(jid, journal); - // Start SyncJouranl thread, if JournalNode Sync is enabled if (conf.getBoolean( DFSConfigKeys.DFS_JOURNALNODE_ENABLE_SYNC_KEY, @@ -148,9 +147,34 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean { @Override public void setConf(Configuration conf) { this.conf = conf; -this.localDir = new File( -conf.get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, -DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_DEFAULT).trim()); + +String journalNodeDir = null; +Collection nameserviceIds; + +nameserviceIds = conf.getTrimmedStringCollection( +DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY); + +if (nameserviceIds.size() == 0) { + nameserviceIds =
hadoop git commit: HDFS-13062. Provide support for JN to use separate journal disk per namespace. Contributed by Bharat Viswanadham.
Repository: hadoop Updated Branches: refs/heads/trunk b3ae11d59 -> dd50f5399 HDFS-13062. Provide support for JN to use separate journal disk per namespace. Contributed by Bharat Viswanadham. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd50f539 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd50f539 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd50f539 Branch: refs/heads/trunk Commit: dd50f53997239bf9078481cf46592ca3e41520b5 Parents: b3ae11d Author: Hanisha KoneruAuthored: Wed Jan 31 16:34:48 2018 -0800 Committer: Hanisha Koneru Committed: Thu Feb 1 12:28:17 2018 -0800 -- .../hdfs/qjournal/server/JournalNode.java | 129 ++-- .../hdfs/qjournal/server/TestJournalNode.java | 148 --- 2 files changed, 211 insertions(+), 66 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd50f539/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index 0954eaf..c772dfc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -17,18 +17,10 @@ */ package org.apache.hadoop.hdfs.qjournal.server; -import static org.apache.hadoop.util.ExitUtil.terminate; - -import java.io.File; -import java.io.FileFilter; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.HashMap; -import java.util.Map; - -import javax.management.ObjectName; - import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -47,14 +39,22 @@ import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.tracing.TraceUtils; import org.apache.hadoop.util.DiskChecker; +import static org.apache.hadoop.util.ExitUtil.terminate; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.htrace.core.Tracer; import org.eclipse.jetty.util.ajax.JSON; -import com.google.common.base.Preconditions; -import com.google.common.collect.Maps; +import javax.management.ObjectName; +import java.io.File; +import java.io.FileFilter; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; /** * The JournalNode is a daemon which allows namenodes using @@ -74,7 +74,7 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean { .newHashMap(); private ObjectName journalNodeInfoBeanName; private String httpServerURI; - private File localDir; + private final ArrayList localDir = Lists.newArrayList(); Tracer tracer; static { @@ -94,11 +94,10 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean { Journal journal = journalsById.get(jid); if (journal == null) { - File logDir = getLogDir(jid); - LOG.info("Initializing journal in directory " + logDir); + File logDir = getLogDir(jid, nameServiceId); + LOG.info("Initializing journal in directory " + logDir); journal = new Journal(conf, logDir, jid, startOpt, new ErrorReporter()); journalsById.put(jid, journal); - // Start SyncJouranl thread, if JournalNode Sync is enabled if (conf.getBoolean( DFSConfigKeys.DFS_JOURNALNODE_ENABLE_SYNC_KEY, @@ -148,9 +147,34 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean { @Override public void setConf(Configuration conf) { this.conf = conf; -this.localDir = new File( -conf.get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, -DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_DEFAULT).trim()); + +String journalNodeDir = null; +Collection nameserviceIds; + +nameserviceIds = conf.getTrimmedStringCollection( +DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY); + +if (nameserviceIds.size() == 0) { + nameserviceIds = conf.getTrimmedStringCollection( + DFSConfigKeys.DFS_NAMESERVICES); +
hadoop git commit: HDFS-12997. Move logging to slf4j in BlockPoolSliceStorage and Storage. Contributed by Ajay Kumar.
Repository: hadoop Updated Branches: refs/heads/trunk 6ca7204ce -> b3ae11d59 HDFS-12997. Move logging to slf4j in BlockPoolSliceStorage and Storage. Contributed by Ajay Kumar. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3ae11d5 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3ae11d5 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3ae11d5 Branch: refs/heads/trunk Commit: b3ae11d59790bb08b81848e9f944db7d3afbbd8a Parents: 6ca7204 Author: Xiaoyu YaoAuthored: Wed Jan 31 23:10:54 2018 -0800 Committer: Xiaoyu Yao Committed: Thu Feb 1 10:45:34 2018 -0800 -- .../hadoop/hdfs/qjournal/server/JNStorage.java | 9 +- .../hadoop/hdfs/server/common/Storage.java | 75 ++--- .../server/datanode/BlockPoolSliceStorage.java | 88 +++ .../hdfs/server/datanode/DataStorage.java | 112 +-- .../hadoop/hdfs/server/namenode/NNStorage.java | 61 +- .../datanode/TestBlockPoolSliceStorage.java | 14 ++- 6 files changed, 178 insertions(+), 181 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3ae11d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java index 7226cae..6bf4903 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java @@ -193,10 +193,9 @@ class JNStorage extends Storage { // /\d+/ in the regex itself. long txid = Long.parseLong(matcher.group(1)); if (txid < minTxIdToKeep) { -LOG.info("Purging no-longer needed file " + txid); +LOG.info("Purging no-longer needed file {}", txid); if (!f.delete()) { - LOG.warn("Unable to delete no-longer-needed data " + - f); + LOG.warn("Unable to delete no-longer-needed data {}", f); } break; } @@ -214,7 +213,7 @@ class JNStorage extends Storage { } setStorageInfo(nsInfo); -LOG.info("Formatting journal " + sd + " with nsid: " + getNamespaceID()); +LOG.info("Formatting journal {} with nsid: {}", sd, getNamespaceID()); // Unlock the directory before formatting, because we will // re-analyze it after format(). The analyzeStorage() call // below is reponsible for re-locking it. This is a no-op @@ -278,7 +277,7 @@ class JNStorage extends Storage { } public void close() throws IOException { -LOG.info("Closing journal storage for " + sd); +LOG.info("Closing journal storage for {}", sd); unlockAll(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3ae11d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index 5409427..3dd43c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -35,8 +35,6 @@ import java.util.Properties; import java.util.concurrent.CopyOnWriteArrayList; import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; @@ -53,7 +51,8 @@ import org.apache.hadoop.util.VersionInfo; import com.google.common.base.Charsets; import com.google.common.base.Preconditions; - +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** @@ -76,7 +75,9 @@ import com.google.common.base.Preconditions; */ @InterfaceAudience.Private public abstract class Storage extends StorageInfo { - public static final Log LOG = LogFactory.getLog(Storage.class.getName()); + + public static final Logger LOG = LoggerFactory + .getLogger(Storage.class.getName()); // last layout version that did not support upgrades public static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3; @@ -396,7 +397,7 @@ public abstract class Storage extends StorageInfo {
[hadoop] Git Push Summary
Repository: hadoop Updated Branches: refs/heads/branch-3.0.1 [created] 7491aaeb8 - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-13043. RBF: Expose the state of the Routers in the federation. Contributed by Inigo Goiri.
Repository: hadoop Updated Branches: refs/heads/branch-2.9 38c3e1711 -> 6769c78cd HDFS-13043. RBF: Expose the state of the Routers in the federation. Contributed by Inigo Goiri. (cherry picked from commit 6ca7204cebf4b7060696a07b123a6dfa7d9d) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6769c78c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6769c78c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6769c78c Branch: refs/heads/branch-2.9 Commit: 6769c78cddf32eceb20cc42f7e9bd001416ed58d Parents: 38c3e17 Author: Inigo GoiriAuthored: Thu Feb 1 10:37:14 2018 -0800 Committer: Inigo Goiri Committed: Thu Feb 1 10:39:25 2018 -0800 -- .../federation/metrics/FederationMBean.java | 6 ++ .../federation/metrics/FederationMetrics.java | 71 +++- .../federation/router/FederationUtil.java | 8 +-- .../federation/store/records/RouterState.java | 8 +-- .../records/impl/pb/RouterStatePBImpl.java | 12 ++-- .../src/main/proto/FederationProtocol.proto | 2 +- .../main/webapps/router/federationhealth.html | 41 +++ .../src/main/webapps/router/federationhealth.js | 26 +++ .../metrics/TestFederationMetrics.java | 50 ++ .../federation/metrics/TestMetricsBase.java | 45 + .../store/TestStateStoreRouterState.java| 3 +- .../store/records/TestRouterState.java | 6 +- 12 files changed, 258 insertions(+), 20 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6769c78c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java index cb4245a..6412398 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java @@ -47,6 +47,12 @@ public interface FederationMBean { String getMountTable(); /** + * Get the latest state of all routers. + * @return JSON with all of the known routers or null if failure. + */ + String getRouters(); + + /** * Get the total capacity of the federated cluster. * @return Total capacity of the federated cluster. */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/6769c78c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java index 685c585..1a5a8be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; +import org.apache.hadoop.hdfs.server.federation.store.RouterStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; @@ -61,10 +62,14 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamenodeRegist import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamenodeRegistrationsResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistrationsRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistrationsResponse; import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
hadoop git commit: HDFS-13043. RBF: Expose the state of the Routers in the federation. Contributed by Inigo Goiri.
Repository: hadoop Updated Branches: refs/heads/branch-2 e5cac88c8 -> 7b8cc048c HDFS-13043. RBF: Expose the state of the Routers in the federation. Contributed by Inigo Goiri. (cherry picked from commit 6ca7204cebf4b7060696a07b123a6dfa7d9d) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b8cc048 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b8cc048 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b8cc048 Branch: refs/heads/branch-2 Commit: 7b8cc048cc0ea20e433b9476d6d196b5f4704c75 Parents: e5cac88 Author: Inigo GoiriAuthored: Thu Feb 1 10:37:14 2018 -0800 Committer: Inigo Goiri Committed: Thu Feb 1 10:38:39 2018 -0800 -- .../federation/metrics/FederationMBean.java | 6 ++ .../federation/metrics/FederationMetrics.java | 71 +++- .../federation/router/FederationUtil.java | 8 +-- .../federation/store/records/RouterState.java | 8 +-- .../records/impl/pb/RouterStatePBImpl.java | 12 ++-- .../src/main/proto/FederationProtocol.proto | 2 +- .../main/webapps/router/federationhealth.html | 41 +++ .../src/main/webapps/router/federationhealth.js | 26 +++ .../metrics/TestFederationMetrics.java | 50 ++ .../federation/metrics/TestMetricsBase.java | 45 + .../store/TestStateStoreRouterState.java| 3 +- .../store/records/TestRouterState.java | 6 +- 12 files changed, 258 insertions(+), 20 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b8cc048/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java index cb4245a..6412398 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java @@ -47,6 +47,12 @@ public interface FederationMBean { String getMountTable(); /** + * Get the latest state of all routers. + * @return JSON with all of the known routers or null if failure. + */ + String getRouters(); + + /** * Get the total capacity of the federated cluster. * @return Total capacity of the federated cluster. */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b8cc048/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java index 685c585..1a5a8be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; +import org.apache.hadoop.hdfs.server.federation.store.RouterStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; @@ -61,10 +62,14 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamenodeRegist import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamenodeRegistrationsResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistrationsRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistrationsResponse; import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
hadoop git commit: HDFS-13043. RBF: Expose the state of the Routers in the federation. Contributed by Inigo Goiri.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 84297f69d -> 7491aaeb8 HDFS-13043. RBF: Expose the state of the Routers in the federation. Contributed by Inigo Goiri. (cherry picked from commit 6ca7204cebf4b7060696a07b123a6dfa7d9d) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7491aaeb Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7491aaeb Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7491aaeb Branch: refs/heads/branch-3.0 Commit: 7491aaeb8f680693cd1528d3689d6039e2081a1e Parents: 84297f6 Author: Inigo GoiriAuthored: Thu Feb 1 10:37:14 2018 -0800 Committer: Inigo Goiri Committed: Thu Feb 1 10:37:56 2018 -0800 -- .../federation/metrics/FederationMBean.java | 6 ++ .../federation/metrics/FederationMetrics.java | 71 +++- .../federation/router/FederationUtil.java | 8 +-- .../federation/store/records/RouterState.java | 8 +-- .../records/impl/pb/RouterStatePBImpl.java | 12 ++-- .../src/main/proto/FederationProtocol.proto | 2 +- .../main/webapps/router/federationhealth.html | 41 +++ .../src/main/webapps/router/federationhealth.js | 26 +++ .../metrics/TestFederationMetrics.java | 50 ++ .../federation/metrics/TestMetricsBase.java | 45 + .../store/TestStateStoreRouterState.java| 3 +- .../store/records/TestRouterState.java | 6 +- 12 files changed, 258 insertions(+), 20 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7491aaeb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java index cb4245a..6412398 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java @@ -47,6 +47,12 @@ public interface FederationMBean { String getMountTable(); /** + * Get the latest state of all routers. + * @return JSON with all of the known routers or null if failure. + */ + String getRouters(); + + /** * Get the total capacity of the federated cluster. * @return Total capacity of the federated cluster. */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/7491aaeb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java index 7844a2e..069bca3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; +import org.apache.hadoop.hdfs.server.federation.store.RouterStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; @@ -64,10 +65,14 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamenodeRegist import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamenodeRegistrationsResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistrationsRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistrationsResponse; import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
hadoop git commit: HDFS-13043. RBF: Expose the state of the Routers in the federation. Contributed by Inigo Goiri.
Repository: hadoop Updated Branches: refs/heads/trunk 682ea21f2 -> 6ca7204ce HDFS-13043. RBF: Expose the state of the Routers in the federation. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ca7204c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ca7204c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ca7204c Branch: refs/heads/trunk Commit: 6ca7204cebf4b7060696a07b123a6dfa7d9d Parents: 682ea21 Author: Inigo GoiriAuthored: Thu Feb 1 10:37:14 2018 -0800 Committer: Inigo Goiri Committed: Thu Feb 1 10:37:14 2018 -0800 -- .../federation/metrics/FederationMBean.java | 6 ++ .../federation/metrics/FederationMetrics.java | 71 +++- .../federation/router/FederationUtil.java | 8 +-- .../federation/store/records/RouterState.java | 8 +-- .../records/impl/pb/RouterStatePBImpl.java | 12 ++-- .../src/main/proto/FederationProtocol.proto | 2 +- .../main/webapps/router/federationhealth.html | 41 +++ .../src/main/webapps/router/federationhealth.js | 26 +++ .../metrics/TestFederationMetrics.java | 50 ++ .../federation/metrics/TestMetricsBase.java | 45 + .../store/TestStateStoreRouterState.java| 3 +- .../store/records/TestRouterState.java | 6 +- 12 files changed, 258 insertions(+), 20 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ca7204c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java index 8abfc6e..79fb3e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java @@ -47,6 +47,12 @@ public interface FederationMBean { String getMountTable(); /** + * Get the latest state of all routers. + * @return JSON with all of the known routers or null if failure. + */ + String getRouters(); + + /** * Get the total capacity of the federated cluster. * @return Total capacity of the federated cluster. */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ca7204c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java index 4582825..a80c3be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; +import org.apache.hadoop.hdfs.server.federation.store.RouterStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; @@ -64,10 +65,14 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamenodeRegist import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamenodeRegistrationsResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistrationsRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistrationsResponse; import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState; import org.apache.hadoop.hdfs.server.federation.store.records.MembershipStats;
hadoop git commit: Revert "YARN-7677. Docker image cannot set HADOOP_CONF_DIR. Contributed by Jim Brennan"
Repository: hadoop Updated Branches: refs/heads/branch-3.0 4010262ae -> 84297f69d Revert "YARN-7677. Docker image cannot set HADOOP_CONF_DIR. Contributed by Jim Brennan" This reverts commit 874bdbc4013b72e40b64fb874e83d3d14045a93d. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84297f69 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84297f69 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84297f69 Branch: refs/heads/branch-3.0 Commit: 84297f69d8b5b8c125f4a0e82eb49b9121a96079 Parents: 4010262 Author: Jason LoweAuthored: Thu Feb 1 12:17:23 2018 -0600 Committer: Jason Lowe Committed: Thu Feb 1 12:17:23 2018 -0600 -- .../server/nodemanager/ContainerExecutor.java | 31 .../nodemanager/LinuxContainerExecutor.java | 8 + .../launcher/ContainerLaunch.java | 15 ++ .../runtime/DefaultLinuxContainerRuntime.java | 6 .../DelegatingLinuxContainerRuntime.java| 11 +++ .../runtime/DockerLinuxContainerRuntime.java| 7 + .../runtime/ContainerRuntime.java | 11 +++ .../launcher/TestContainerLaunch.java | 10 ++- 8 files changed, 66 insertions(+), 33 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/84297f69/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index f5d454e..0b4dbc9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -334,6 +334,7 @@ public abstract class ContainerExecutor implements Configurable { public void writeLaunchEnv(OutputStream out, Map environment, Map resources, List command, Path logDir, String user, String outFilename) throws IOException { +updateEnvForWhitelistVars(environment); ContainerLaunch.ShellScriptBuilder sb = ContainerLaunch.ShellScriptBuilder.create(); @@ -351,19 +352,6 @@ public abstract class ContainerExecutor implements Configurable { for (Map.Entry env : environment.entrySet()) { sb.env(env.getKey(), env.getValue()); } - // Whitelist environment variables are treated specially. - // Only add them if they are not already defined in the environment. - // Add them using special syntax to prevent them from eclipsing - // variables that may be set explicitly in the container image (e.g, - // in a docker image) - for(String var : whitelistVars) { -if (!environment.containsKey(var)) { - String val = getNMEnvVar(var); - if (val != null) { -sb.whitelistedEnv(var, val); - } -} - } } if (resources != null) { @@ -663,6 +651,23 @@ public abstract class ContainerExecutor implements Configurable { } } + /** + * Propagate variables from the nodemanager's environment into the + * container's environment if unspecified by the container. + * @param env the environment to update + * @see org.apache.hadoop.yarn.conf.YarnConfiguration#NM_ENV_WHITELIST + */ + protected void updateEnvForWhitelistVars(Map env) { +for(String var : whitelistVars) { + if (!env.containsKey(var)) { +String val = getNMEnvVar(var); +if (val != null) { + env.put(var, val); +} + } +} + } + @VisibleForTesting protected String getNMEnvVar(String varname) { return System.getenv(varname); http://git-wip-us.apache.org/repos/asf/hadoop/blob/84297f69/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
hadoop git commit: Revert "YARN-7677. Docker image cannot set HADOOP_CONF_DIR. Contributed by Jim Brennan"
Repository: hadoop Updated Branches: refs/heads/trunk ae2177d29 -> 682ea21f2 Revert "YARN-7677. Docker image cannot set HADOOP_CONF_DIR. Contributed by Jim Brennan" This reverts commit 12eaae383ad06de8f9959241b2451dec82cf9ceb. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/682ea21f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/682ea21f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/682ea21f Branch: refs/heads/trunk Commit: 682ea21f2bbc587e1b727b3c895c2f513a908432 Parents: ae2177d Author: Jason LoweAuthored: Thu Feb 1 12:14:09 2018 -0600 Committer: Jason Lowe Committed: Thu Feb 1 12:14:09 2018 -0600 -- .../server/nodemanager/ContainerExecutor.java | 31 .../nodemanager/LinuxContainerExecutor.java | 8 + .../launcher/ContainerLaunch.java | 15 ++ .../runtime/DefaultLinuxContainerRuntime.java | 6 .../DelegatingLinuxContainerRuntime.java| 11 +++ .../runtime/DockerLinuxContainerRuntime.java| 7 + .../runtime/ContainerRuntime.java | 11 +++ .../launcher/TestContainerLaunch.java | 10 ++- 8 files changed, 66 insertions(+), 33 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/682ea21f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index 6241733..f4279a3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -346,6 +346,7 @@ public abstract class ContainerExecutor implements Configurable { public void writeLaunchEnv(OutputStream out, Map environment, Map resources, List command, Path logDir, String user, String outFilename) throws IOException { +updateEnvForWhitelistVars(environment); ContainerLaunch.ShellScriptBuilder sb = ContainerLaunch.ShellScriptBuilder.create(); @@ -363,19 +364,6 @@ public abstract class ContainerExecutor implements Configurable { for (Map.Entry env : environment.entrySet()) { sb.env(env.getKey(), env.getValue()); } - // Whitelist environment variables are treated specially. - // Only add them if they are not already defined in the environment. - // Add them using special syntax to prevent them from eclipsing - // variables that may be set explicitly in the container image (e.g, - // in a docker image) - for(String var : whitelistVars) { -if (!environment.containsKey(var)) { - String val = getNMEnvVar(var); - if (val != null) { -sb.whitelistedEnv(var, val); - } -} - } } if (resources != null) { @@ -675,6 +663,23 @@ public abstract class ContainerExecutor implements Configurable { } } + /** + * Propagate variables from the nodemanager's environment into the + * container's environment if unspecified by the container. + * @param env the environment to update + * @see org.apache.hadoop.yarn.conf.YarnConfiguration#NM_ENV_WHITELIST + */ + protected void updateEnvForWhitelistVars(Map env) { +for(String var : whitelistVars) { + if (!env.containsKey(var)) { +String val = getNMEnvVar(var); +if (val != null) { + env.put(var, val); +} + } +} + } + @VisibleForTesting protected String getNMEnvVar(String varname) { return System.getenv(varname); http://git-wip-us.apache.org/repos/asf/hadoop/blob/682ea21f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java