goiri commented on a change in pull request #4081:
URL: https://github.com/apache/hadoop/pull/4081#discussion_r830172952



##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
##########
@@ -59,5 +74,67 @@ public void testNamenodeRpcBindAny() throws IOException {
       conf.unset(DFS_NAMENODE_RPC_BIND_HOST_KEY);
     }
   }
+
+  /**
+   * A test to make sure that if an authorized user adds "clientIp:" to their
+   * caller context, it will be used to make locality decisions on the NN.
+   */
+  @Test
+  public void testNamenodeRpcClientIpProxy()
+      throws InterruptedException, IOException {
+    Configuration conf = new HdfsConfiguration();
+
+    conf.set(DFS_NAMENODE_IP_PROXY_USERS, "fake_joe");
+    // Make 3 nodes & racks so that we have a decent shot of detecting when
+    // our change overrides the random choice of datanode.
+    final String[] racks = new String[]{"/rack1", "/rack2", "/rack3"};
+    final String[] hosts = new String[]{"node1", "node2", "node3"};
+    MiniDFSCluster cluster = null;
+    final CallerContext original = CallerContext.getCurrent();
+
+    try {
+      cluster = new MiniDFSCluster.Builder(conf)
+          .racks(racks).hosts(hosts).numDataNodes(hosts.length)
+          .build();
+      cluster.waitActive();
+      DistributedFileSystem fs = cluster.getFileSystem();
+      // Write a sample file
+      final Path fooName = fs.makeQualified(new Path("/foo"));
+      FSDataOutputStream stream = fs.create(fooName);
+      stream.write("Hello world!\n".getBytes(StandardCharsets.UTF_8));
+      stream.close();
+      // Set the caller context to set the ip address
+      CallerContext.setCurrent(
+          new CallerContext.Builder("test", conf)
+              .append(CallerContext.CLIENT_IP_STR, hosts[0])
+              .build());
+      // Run as fake joe to authorize the test
+      UserGroupInformation
+          .createUserForTesting("fake_joe", new String[]{"fake_group"})
+          .doAs(new PrivilegedExceptionAction<Object>() {
+            @Override
+            public Object run() throws Exception {
+              // Create a new file system as the joe user
+              DistributedFileSystem joe_fs =

Review comment:
       joeFs

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
##########
@@ -987,6 +987,8 @@
       "dfs.namenode.lifeline.handler.count";
   public static final String  DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY = 
"dfs.namenode.service.handler.count";
   public static final int     DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT = 10;
+  // List of users that can override their client ip
+  public static final String  DFS_NAMENODE_IP_PROXY_USERS = 
"dfs.namenode.ip-proxy-users";

Review comment:
       TestHdfsConfigFields is not happy.

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
##########
@@ -24,14 +24,29 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_IP_PROXY_USERS;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
 import static org.junit.Assert.assertEquals;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.security.PrivilegedExceptionAction;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;

Review comment:
       A bunch of these seem unused




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-issues-h...@hadoop.apache.org

Reply via email to