Author: todd Date: Thu Dec 15 00:42:50 2011 New Revision: 1214579 URL: http://svn.apache.org/viewvc?rev=1214579&view=rev Log: HDFS-2683. Authority-based lookup of proxy provider fails if path becomes canonicalized. Contributed by Todd Lipcon.
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt?rev=1214579&r1=1214578&r2=1214579&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt Thu Dec 15 00:42:50 2011 @@ -53,3 +53,5 @@ HDFS-2634. Standby needs to ingest lates HDFS-2671. NN should throw StandbyException in response to RPCs in STANDBY state (todd) HDFS-2680. DFSClient should construct failover proxy with exponential backoff (todd) + +HDFS-2683. Authority-based lookup of proxy provider fails if path becomes canonicalized (todd) Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1214579&r1=1214578&r2=1214579&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Thu Dec 15 00:42:50 2011 @@ -313,7 +313,8 @@ public class DFSClient implements java.i this.socketCache = new SocketCache(dfsClientConf.socketCacheCapacity); - Class<?> failoverProxyProviderClass = getFailoverProxyProviderClass(authority, conf); + Class<?> failoverProxyProviderClass = getFailoverProxyProviderClass( + nameNodeUri, conf); if (nameNodeUri != null && failoverProxyProviderClass != null) { FailoverProxyProvider failoverProxyProvider = (FailoverProxyProvider) @@ -353,15 +354,32 @@ public class DFSClient implements java.i } } - private Class<?> getFailoverProxyProviderClass(String authority, Configuration conf) + private Class<?> getFailoverProxyProviderClass(URI nameNodeUri, Configuration conf) throws IOException { - String configKey = DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + authority; + if (nameNodeUri == null) { + return null; + } + String host = nameNodeUri.getHost(); + + String configKey = DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + host; try { - return conf.getClass(configKey, null); + Class<?> ret = conf.getClass(configKey, null); + if (ret != null) { + // If we found a proxy provider, then this URI should be a logical NN. + // Given that, it shouldn't have a non-default port number. + int port = nameNodeUri.getPort(); + if (port > 0 && port != NameNode.DEFAULT_PORT) { + throw new IOException( + "Port " + port + " specified in URI " + nameNodeUri + + " but host '" + host + "' is a logical (HA) namenode" + + " and does not use port information."); + } + } + return ret; } catch (RuntimeException e) { if (e.getCause() instanceof ClassNotFoundException) { throw new IOException("Could not load failover proxy provider class " - + conf.get(configKey) + " which is configured for authority " + authority, + + conf.get(configKey) + " which is configured for authority " + nameNodeUri, e); } else { throw e; Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java?rev=1214579&r1=1214578&r2=1214579&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java Thu Dec 15 00:42:50 2011 @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; +import static org.junit.Assert.*; import java.io.IOException; import java.io.OutputStream; @@ -31,7 +32,9 @@ import org.apache.hadoop.conf.Configurat import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -43,6 +46,7 @@ public class TestDFSClientFailover { private Configuration conf = new Configuration(); private MiniDFSCluster cluster; + private static final String LOGICAL_HOSTNAME = "ha-nn-uri"; @Before public void setUpCluster() throws IOException { @@ -65,10 +69,6 @@ public class TestDFSClientFailover { public void testDfsClientFailover() throws IOException, URISyntaxException { InetSocketAddress nnAddr1 = cluster.getNameNode(0).getNameNodeAddress(); InetSocketAddress nnAddr2 = cluster.getNameNode(1).getNameNodeAddress(); - String nameServiceId1 = DFSUtil.getNameServiceIdFromAddress(conf, nnAddr1, - DFS_NAMENODE_RPC_ADDRESS_KEY); - String nameServiceId2 = DFSUtil.getNameServiceIdFromAddress(conf, nnAddr2, - DFS_NAMENODE_RPC_ADDRESS_KEY); ClientProtocol nn1 = DFSUtil.createNamenode(nnAddr1, conf); ClientProtocol nn2 = DFSUtil.createNamenode(nnAddr2, conf); @@ -89,8 +89,33 @@ public class TestDFSClientFailover { cluster.getNameNode(0).stop(); AppendTestUtil.check(fs, TEST_FILE, FILE_LENGTH_TO_VERIFY); + // Check that it functions even if the URL becomes canonicalized + // to include a port number. + Path withPort = new Path("hdfs://" + LOGICAL_HOSTNAME + ":" + + NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath()); + FileSystem fs2 = withPort.getFileSystem(fs.getConf()); + assertTrue(fs2.exists(withPort)); + fs.close(); } + + /** + * Regression test for HDFS-2683. + */ + @Test + public void testLogicalUriShouldNotHavePorts() { + Configuration conf = new HdfsConfiguration(); + conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + ".foo", + ConfiguredFailoverProxyProvider.class.getName()); + Path p = new Path("hdfs://foo:12345/"); + try { + p.getFileSystem(conf).exists(p); + fail("Did not fail with fake FS"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains( + "does not use port information", ioe); + } + } public static FileSystem configureFailoverFs(MiniDFSCluster cluster, Configuration conf) throws IOException, URISyntaxException { @@ -99,7 +124,6 @@ public class TestDFSClientFailover { String nsId = "nameserviceId1"; - final String logicalNameNodeId = "ha-nn-uri"; String nameNodeId1 = "nn1"; String nameNodeId2 = "nn2"; @@ -114,10 +138,10 @@ public class TestDFSClientFailover { conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nsId); conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY, nsId), nameNodeId1 + "," + nameNodeId2); - conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalNameNodeId, + conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + LOGICAL_HOSTNAME, ConfiguredFailoverProxyProvider.class.getName()); - FileSystem fs = FileSystem.get(new URI("hdfs://" + logicalNameNodeId), conf); + FileSystem fs = FileSystem.get(new URI("hdfs://" + LOGICAL_HOSTNAME), conf); return fs; }