Repository: hadoop Updated Branches: refs/heads/branch-2.7 4aea76067 -> 1e5c40b5b
HDFS-9444. Add utility to find set of available ephemeral ports to ServerSocketUtil. Contributed by Masatake Iwasaki (cherry picked from commit e9a34ae29c7390f3ffcbeee02dc5faa26fca482a) (cherry picked from commit 5f754e8638d5a35ab12765edec6561228312f71c) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e5c40b5 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e5c40b5 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e5c40b5 Branch: refs/heads/branch-2.7 Commit: 1e5c40b5b4b013bd63e3c0e060cb4c31e5e8ba82 Parents: 4aea760 Author: Brahma Reddy Battula <bra...@apache.org> Authored: Wed Sep 28 10:50:50 2016 +0530 Committer: Zhe Zhang <z...@apache.org> Committed: Tue Oct 18 10:50:19 2016 -0700 ---------------------------------------------------------------------- .../org/apache/hadoop/net/ServerSocketUtil.java | 22 +++++++++++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../server/namenode/ha/TestEditLogTailer.java | 39 +++++++++++++++----- 3 files changed, 54 insertions(+), 10 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e5c40b5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java index 0ce835f..b9e2c62 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java @@ -60,4 +60,26 @@ public class ServerSocketUtil { } } + /** + * Find the specified number of unique ports available. + * The ports are all closed afterwards, + * so other network services started may grab those same ports. + * + * @param numPorts number of required port nubmers + * @return array of available port numbers + * @throws IOException + */ + public static int[] getPorts(int numPorts) throws IOException { + ServerSocket[] sockets = new ServerSocket[numPorts]; + int[] ports = new int[numPorts]; + for (int i = 0; i < numPorts; i++) { + ServerSocket sock = new ServerSocket(0); + sockets[i] = sock; + ports[i] = sock.getLocalPort(); + } + for (ServerSocket sock : sockets) { + sock.close(); + } + return ports; + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e5c40b5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c9333a1..b19863b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -169,6 +169,9 @@ Release 2.7.4 - UNRELEASED HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for interleaving block reports. (Vinitha Gankidi via shv) + HDFS-9444. Add utility to find set of available ephemeral ports to + ServerSocketUtil. (Masatake Iwasaki via Brahma Reddy Battula) + Release 2.7.3 - 2016-08-25 INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e5c40b5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java index 8c61c92..ea7b00a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java @@ -19,8 +19,11 @@ package org.apache.hadoop.hdfs.server.namenode.ha; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; import java.io.IOException; +import java.net.BindException; import java.net.URI; import org.apache.commons.logging.impl.Log4JLogger; @@ -37,6 +40,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.Test; @@ -120,17 +124,32 @@ public class TestEditLogTailer { // Roll every 1s conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); - - // Have to specify IPC ports so the NNs can talk to each other. - MiniDFSNNTopology topology = new MiniDFSNNTopology() - .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032))); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .nnTopology(topology) - .numDataNodes(0) - .build(); + MiniDFSCluster cluster = null; + for (int i = 0; i < 5; i++) { + try { + // Have to specify IPC ports so the NNs can talk to each other. + int[] ports = ServerSocketUtil.getPorts(2); + MiniDFSNNTopology topology = new MiniDFSNNTopology() + .addNameservice(new MiniDFSNNTopology.NSConf("ns1") + .addNN(new MiniDFSNNTopology.NNConf("nn1") + .setIpcPort(ports[0])) + .addNN(new MiniDFSNNTopology.NNConf("nn2") + .setIpcPort(ports[1]))); + + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(topology) + .numDataNodes(0) + .build(); + break; + } catch (BindException e) { + // retry if race on ports given by ServerSocketUtil#getPorts + continue; + } + } + if (cluster == null) { + fail("failed to start mini cluster."); + } try { cluster.transitionToActive(activeIndex); waitForLogRollInSharedDir(cluster, 3); --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org