Author: kihwal Date: Fri May 9 01:46:42 2014 New Revision: 1593470 URL: http://svn.apache.org/r1593470 Log: HDFS-6329. WebHdfs does not work if HA is enabled on NN but logical URI is not configured. Contributed by Kihwal Lee.
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1593470&r1=1593469&r2=1593470&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri May 9 01:46:42 2014 @@ -438,6 +438,8 @@ Release 2.5.0 - UNRELEASED HDFS-5381. ExtendedBlock#hashCode should use both blockId and block pool ID (Benoy Antony via Colin Patrick McCabe) + HDFS-6329. WebHdfs does not work if HA is enabled on NN but logical URI is + not configured. (kihwal) Release 2.4.1 - UNRELEASED Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1593470&r1=1593469&r2=1593470&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri May 9 01:46:42 2014 @@ -273,10 +273,11 @@ public class NameNode implements NameNod private JvmPauseMonitor pauseMonitor; private ObjectName nameNodeStatusBeanName; /** - * The service name of the delegation token issued by the namenode. It is - * the name service id in HA mode, or the rpc address in non-HA mode. + * The namenode address that clients will use to access this namenode + * or the name service. For HA configurations using logical URI, it + * will be the logical address. */ - private String tokenServiceName; + private String clientNamenodeAddress; /** Format a new filesystem. Destroys any filesystem that may already * exist at this location. **/ @@ -319,7 +320,54 @@ public class NameNode implements NameNod * * @return The name service id in HA-mode, or the rpc address in non-HA mode */ - public String getTokenServiceName() { return tokenServiceName; } + public String getTokenServiceName() { + return getClientNamenodeAddress(); + } + + /** + * Set the namenode address that will be used by clients to access this + * namenode or name service. This needs to be called before the config + * is overriden. + */ + public void setClientNamenodeAddress(Configuration conf) { + String nnAddr = conf.get(FS_DEFAULT_NAME_KEY); + if (nnAddr == null) { + // default fs is not set. + clientNamenodeAddress = null; + return; + } + + LOG.info(FS_DEFAULT_NAME_KEY + " is " + nnAddr); + URI nnUri = URI.create(nnAddr); + + String nnHost = nnUri.getHost(); + if (nnHost == null) { + clientNamenodeAddress = null; + return; + } + + if (DFSUtil.getNameServiceIds(conf).contains(nnHost)) { + // host name is logical + clientNamenodeAddress = nnHost; + } else if (nnUri.getPort() > 0) { + // physical address with a valid port + clientNamenodeAddress = nnUri.getAuthority(); + } else { + // the port is missing or 0. Figure out real bind address later. + clientNamenodeAddress = null; + return; + } + LOG.info("Clients are to use " + clientNamenodeAddress + " to access" + + " this namenode/service."); + } + + /** + * Get the namenode address to be used by clients. + * @return nn address + */ + public String getClientNamenodeAddress() { + return clientNamenodeAddress; + } public static InetSocketAddress getAddress(String address) { return NetUtils.createSocketAddr(address, DEFAULT_PORT); @@ -535,9 +583,14 @@ public class NameNode implements NameNod loadNamesystem(conf); rpcServer = createRpcServer(conf); - final String nsId = getNameServiceId(conf); - tokenServiceName = HAUtil.isHAEnabled(conf, nsId) ? nsId : NetUtils - .getHostPortString(rpcServer.getRpcAddress()); + if (clientNamenodeAddress == null) { + // This is expected for MiniDFSCluster. Set it now using + // the RPC server's bind address. + clientNamenodeAddress = + NetUtils.getHostPortString(rpcServer.getRpcAddress()); + LOG.info("Clients are to use " + clientNamenodeAddress + " to access" + + " this namenode/service."); + } if (NamenodeRole.NAMENODE == role) { httpServer.setNameNodeAddress(getNameNodeAddress()); httpServer.setFSImage(getFSImage()); @@ -683,6 +736,7 @@ public class NameNode implements NameNod throws IOException { this.conf = conf; this.role = role; + setClientNamenodeAddress(conf); String nsId = getNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); this.haEnabled = HAUtil.isHAEnabled(conf, nsId); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1593470&r1=1593469&r2=1593470&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Fri May 9 01:46:42 2014 @@ -139,8 +139,8 @@ public class DFSTestUtil { String clusterId = StartupOption.FORMAT.getClusterId(); if(clusterId == null || clusterId.isEmpty()) StartupOption.FORMAT.setClusterId("testClusterID"); - - NameNode.format(conf); + // Use a copy of conf as it can be altered by namenode during format. + NameNode.format(new Configuration(conf)); } /** Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1593470&r1=1593469&r2=1593470&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Fri May 9 01:46:42 2014 @@ -760,8 +760,11 @@ public class MiniDFSCluster { if (!federation && nnTopology.countNameNodes() == 1) { NNConf onlyNN = nnTopology.getOnlyNameNode(); - // we only had one NN, set DEFAULT_NAME for it - conf.set(FS_DEFAULT_NAME_KEY, "127.0.0.1:" + onlyNN.getIpcPort()); + // we only had one NN, set DEFAULT_NAME for it. If not explicitly + // specified initially, the port will be 0 to make NN bind to any + // available port. It will be set to the right address after + // NN is started. + conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:" + onlyNN.getIpcPort()); } List<String> allNsIds = Lists.newArrayList(); @@ -777,6 +780,7 @@ public class MiniDFSCluster { int nnCounter = 0; for (MiniDFSNNTopology.NSConf nameservice : nnTopology.getNameservices()) { String nsId = nameservice.getId(); + String lastDefaultFileSystem = null; Preconditions.checkArgument( !federation || nsId != null, @@ -860,10 +864,19 @@ public class MiniDFSCluster { for (NNConf nn : nameservice.getNNs()) { initNameNodeConf(conf, nsId, nn.getNnId(), manageNameDfsDirs, enableManagedDfsDirsRedundancy, nnCounter); - createNameNode(nnCounter++, conf, numDataNodes, false, operation, + createNameNode(nnCounter, conf, numDataNodes, false, operation, clusterId, nsId, nn.getNnId()); + // Record the last namenode uri + if (nameNodes[nnCounter] != null && nameNodes[nnCounter].conf != null) { + lastDefaultFileSystem = + nameNodes[nnCounter].conf.get(FS_DEFAULT_NAME_KEY); + } + nnCounter++; + } + if (!federation && lastDefaultFileSystem != null) { + // Set the default file system to the actual bind address of NN. + conf.set(FS_DEFAULT_NAME_KEY, lastDefaultFileSystem); } - } } @@ -977,7 +990,8 @@ public class MiniDFSCluster { operation.setClusterId(clusterId); } - // Start the NameNode + // Start the NameNode after saving the default file system. + String originalDefaultFs = conf.get(FS_DEFAULT_NAME_KEY); String[] args = createArgs(operation); NameNode nn = NameNode.createNameNode(args, conf); if (operation == StartupOption.RECOVER) { @@ -1001,6 +1015,12 @@ public class MiniDFSCluster { DFS_NAMENODE_HTTP_ADDRESS_KEY); nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, operation, new Configuration(conf)); + // Restore the default fs name + if (originalDefaultFs == null) { + conf.set(FS_DEFAULT_NAME_KEY, ""); + } else { + conf.set(FS_DEFAULT_NAME_KEY, originalDefaultFs); + } } /** Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java?rev=1593470&r1=1593469&r2=1593470&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java Fri May 9 01:46:42 2014 @@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.DFSConfigK import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.Token; @@ -37,6 +38,7 @@ import org.junit.Test; import java.io.IOException; import java.net.URI; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; @@ -119,6 +121,8 @@ public class TestWebHDFSForHA { @Test public void testFailoverAfterOpen() throws IOException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); + conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME + + "://" + LOGICAL_NAME); MiniDFSCluster cluster = null; FileSystem fs = null; final Path p = new Path("/test"); @@ -152,4 +156,4 @@ public class TestWebHDFSForHA { } } } -} \ No newline at end of file +}