Revert "HDFS-8855. Webhdfs client leaks active NameNode connections. 
Contributed by Xiaobing Zhou."

This reverts commit 84cbd72afda6344e220526fac5c560f00f84e374.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88beb46c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88beb46c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88beb46c

Branch: refs/heads/HDFS-7240
Commit: 88beb46cf6e6fd3e51f73a411a2750de7595e326
Parents: 3fb1ece
Author: Haohui Mai <whe...@apache.org>
Authored: Wed Nov 4 10:21:13 2015 -0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Wed Nov 4 10:21:13 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/security/token/Token.java |  11 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 -
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   4 -
 .../web/webhdfs/DataNodeUGIProvider.java        | 106 ++-------
 .../datanode/web/webhdfs/WebHdfsHandler.java    |   2 +-
 .../src/main/resources/hdfs-default.xml         |   8 -
 .../web/webhdfs/TestDataNodeUGIProvider.java    | 231 -------------------
 7 files changed, 19 insertions(+), 346 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
index f189a96..2420155 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
@@ -19,8 +19,6 @@
 package org.apache.hadoop.security.token;
 
 import com.google.common.collect.Maps;
-import com.google.common.primitives.Bytes;
-
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -31,11 +29,9 @@ import org.apache.hadoop.io.*;
 import org.apache.hadoop.util.ReflectionUtils;
 
 import java.io.*;
-import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Map;
 import java.util.ServiceLoader;
-import java.util.UUID;
 
 /**
  * The client-side form of the token.
@@ -341,12 +337,7 @@ public class Token<T extends TokenIdentifier> implements 
Writable {
     identifierToString(buffer);
     return buffer.toString();
   }
-
-  public String buildCacheKey() {
-    return UUID.nameUUIDFromBytes(
-        Bytes.concat(kind.getBytes(), identifier, password)).toString();
-  }
-
+  
   private static ServiceLoader<TokenRenewer> renewers =
       ServiceLoader.load(TokenRenewer.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 500dc92..f2d8296 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2152,9 +2152,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-9160. [OIV-Doc] : Missing details of 'delimited' for processor options
     (nijel via vinayakumarb)
 
-    HDFS-8855. Webhdfs client leaks active NameNode connections.
-    (Xiaobing Zhou via jitendra) 
-
     HDFS-9235. hdfs-native-client build getting errors when built with cmake
     2.6. (Eric Payne via wheat9)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 424f963..c14ce20 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -70,10 +70,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_WEBHDFS_NETTY_HIGH_WATERMARK =
       "dfs.webhdfs.netty.high.watermark";
   public static final int  DFS_WEBHDFS_NETTY_HIGH_WATERMARK_DEFAULT = 65535;
-  public static final String  DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_KEY =
-      "dfs.webhdfs.ugi.expire.after.access";
-  public static final int     DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_DEFAULT =
-      10*60*1000; //10 minutes
 
   // HA related configuration
   public static final String  DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY = 
"dfs.datanode.restart.replica.expiration";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/DataNodeUGIProvider.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/DataNodeUGIProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/DataNodeUGIProvider.java
index 064def8..ea1c29f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/DataNodeUGIProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/DataNodeUGIProvider.java
@@ -13,26 +13,14 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.web.webhdfs;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
-import org.apache.hadoop.io.WritableComparator;
-import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
 
 /**
  * Create UGI from the request for the WebHDFS requests for the DNs. Note that
@@ -41,75 +29,34 @@ import java.util.concurrent.TimeUnit;
  */
 class DataNodeUGIProvider {
   private final ParameterParser params;
-  private static Cache<String, UserGroupInformation> ugiCache;
-  public static final Log LOG = LogFactory.getLog(Client.class);
 
-  DataNodeUGIProvider(ParameterParser params, Configuration conf) {
+  DataNodeUGIProvider(ParameterParser params) {
     this.params = params;
-    if (ugiCache == null) {
-      synchronized (DataNodeUGIProvider.class) {
-        if (ugiCache == null) {
-          ugiCache = CacheBuilder
-              .newBuilder()
-              .expireAfterAccess(
-                  conf.getInt(
-                      DFSConfigKeys.DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_KEY,
-                      
DFSConfigKeys.DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_DEFAULT),
-                  TimeUnit.MILLISECONDS).build();
-        }
-      }
-    }
   }
 
   UserGroupInformation ugi() throws IOException {
-    UserGroupInformation ugi;
-
-    try {
-      if (UserGroupInformation.isSecurityEnabled()) {
-        final Token<DelegationTokenIdentifier> token = 
params.delegationToken();
+    if (UserGroupInformation.isSecurityEnabled()) {
+      return tokenUGI();
+    }
 
-        ugi = ugiCache.get(buildTokenCacheKey(token),
-            new Callable<UserGroupInformation>() {
-              @Override
-              public UserGroupInformation call() throws Exception {
-                return tokenUGI(token);
-              }
-            });
-      } else {
-        final String usernameFromQuery = params.userName();
-        final String doAsUserFromQuery = params.doAsUser();
-        final String remoteUser = usernameFromQuery == null ? JspHelper
-            .getDefaultWebUserName(params.conf()) // not specified in request
-            : usernameFromQuery;
+    final String usernameFromQuery = params.userName();
+    final String doAsUserFromQuery = params.doAsUser();
+    final String remoteUser = usernameFromQuery == null
+        ? JspHelper.getDefaultWebUserName(params.conf()) // not specified in
+        // request
+        : usernameFromQuery;
 
-        ugi = ugiCache.get(
-            buildNonTokenCacheKey(doAsUserFromQuery, remoteUser),
-            new Callable<UserGroupInformation>() {
-              @Override
-              public UserGroupInformation call() throws Exception {
-                return nonTokenUGI(usernameFromQuery, doAsUserFromQuery,
-                    remoteUser);
-              }
-            });
-      }
-    } catch (ExecutionException e) {
-      Throwable cause = e.getCause();
-      if (cause instanceof IOException) {
-        throw (IOException) cause;
-      } else {
-        throw new IOException(cause);
-      }
+    UserGroupInformation ugi = 
UserGroupInformation.createRemoteUser(remoteUser);
+    JspHelper.checkUsername(ugi.getShortUserName(), usernameFromQuery);
+    if (doAsUserFromQuery != null) {
+      // create and attempt to authorize a proxy user
+      ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, ugi);
     }
-
     return ugi;
   }
 
-  private String buildTokenCacheKey(Token<DelegationTokenIdentifier> token) {
-    return token.buildCacheKey();
-  }
-
-  private UserGroupInformation tokenUGI(Token<DelegationTokenIdentifier> token)
-      throws IOException {
+  private UserGroupInformation tokenUGI() throws IOException {
+    Token<DelegationTokenIdentifier> token = params.delegationToken();
     ByteArrayInputStream buf =
       new ByteArrayInputStream(token.getIdentifier());
     DataInputStream in = new DataInputStream(buf);
@@ -120,23 +67,4 @@ class DataNodeUGIProvider {
     return ugi;
   }
 
-  private String buildNonTokenCacheKey(String doAsUserFromQuery,
-      String remoteUser) throws IOException {
-    String key = doAsUserFromQuery == null ? String.format("{%s}", remoteUser)
-        : String.format("{%s}:{%s}", remoteUser, doAsUserFromQuery);
-    return key;
-  }
-
-  private UserGroupInformation nonTokenUGI(String usernameFromQuery,
-      String doAsUserFromQuery, String remoteUser) throws IOException {
-
-    UserGroupInformation ugi = UserGroupInformation
-        .createRemoteUser(remoteUser);
-    JspHelper.checkUsername(ugi.getShortUserName(), usernameFromQuery);
-    if (doAsUserFromQuery != null) {
-      // create and attempt to authorize a proxy user
-      ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, ugi);
-    }
-    return ugi;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index f8ddce1..dffe34d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -107,7 +107,7 @@ public class WebHdfsHandler extends 
SimpleChannelInboundHandler<HttpRequest> {
     Preconditions.checkArgument(req.uri().startsWith(WEBHDFS_PREFIX));
     QueryStringDecoder queryString = new QueryStringDecoder(req.uri());
     params = new ParameterParser(queryString, conf);
-    DataNodeUGIProvider ugiProvider = new DataNodeUGIProvider(params, conf);
+    DataNodeUGIProvider ugiProvider = new DataNodeUGIProvider(params);
     ugi = ugiProvider.ugi();
     path = params.path();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 81c510c..df48015 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2473,14 +2473,6 @@
 </property>
 
 <property>
-    <name>dfs.webhdfs.ugi.expire.after.access</name>
-    <value>600000</value>
-    <description>How long in milliseconds after the last access
-      the cached UGI will expire. With 0, never expire.
-    </description>
-</property>
-
-<property>
   <name>dfs.namenode.blocks.per.postponedblocks.rescan</name>
   <value>10000</value>
   <description>Number of blocks to rescan for each iteration of

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java
deleted file mode 100644
index b87371e..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.datanode.web.webhdfs;
-
-import static 
org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
-import static org.mockito.Mockito.mock;
-import io.netty.handler.codec.http.QueryStringDecoder;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.web.WebHdfsConstants;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
-import org.apache.hadoop.hdfs.web.resources.DelegationParam;
-import org.apache.hadoop.hdfs.web.resources.LengthParam;
-import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam;
-import org.apache.hadoop.hdfs.web.resources.OffsetParam;
-import org.apache.hadoop.hdfs.web.resources.Param;
-import org.apache.hadoop.hdfs.web.resources.UserParam;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableComparator;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Lists;
-
-public class TestDataNodeUGIProvider {
-  private final URI uri = URI.create(WebHdfsConstants.WEBHDFS_SCHEME + "://"
-      + "127.0.0.1:0");
-  private final String PATH = "/foo";
-  private final int OFFSET = 42;
-  private final int LENGTH = 512;
-  private final int EXPIRE_AFTER_ACCESS = 5*1000;
-
-  @Test
-  public void testUGICacheSecure() throws Exception {
-
-    final Configuration conf = WebHdfsTestUtil.createConf();
-    conf.setInt(DFSConfigKeys.DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_KEY,
-        EXPIRE_AFTER_ACCESS);
-
-    // fake turning on security so api thinks it should use tokens
-    SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
-    UserGroupInformation.setConfiguration(conf);
-
-    UserGroupInformation ugi = UserGroupInformation
-        .createRemoteUser("test-user");
-    ugi.setAuthenticationMethod(KERBEROS);
-    ugi = UserGroupInformation.createProxyUser("test-proxy-user", ugi);
-    UserGroupInformation.setLoginUser(ugi);
-
-    List<Token<DelegationTokenIdentifier>> tokens = Lists.newArrayList();
-    getWebHdfsFileSystem(ugi, conf, tokens);
-
-    String uri1 = WebHdfsFileSystem.PATH_PREFIX
-        + PATH
-        + "?op=OPEN"
-        + Param.toSortedString("&", new NamenodeAddressParam("127.0.0.1:1010"),
-            new OffsetParam((long) OFFSET), new LengthParam((long) LENGTH),
-            new DelegationParam(tokens.get(0).encodeToUrlString()));
-
-    String uri2 = WebHdfsFileSystem.PATH_PREFIX
-        + PATH
-        + "?op=OPEN"
-        + Param.toSortedString("&", new NamenodeAddressParam("127.0.0.1:1010"),
-            new OffsetParam((long) OFFSET), new LengthParam((long) LENGTH),
-            new DelegationParam(tokens.get(1).encodeToUrlString()));
-
-    DataNodeUGIProvider ugiProvider1 = new DataNodeUGIProvider(
-        new ParameterParser(new QueryStringDecoder(URI.create(uri1)), conf),
-        conf);
-    UserGroupInformation ugi11 = ugiProvider1.ugi();
-    UserGroupInformation ugi12 = ugiProvider1.ugi();
-
-    Assert.assertEquals(
-        "With UGI cache, two UGIs returned by the same token should be same",
-        ugi11, ugi12);
-
-    DataNodeUGIProvider ugiProvider2 = new DataNodeUGIProvider(
-        new ParameterParser(new QueryStringDecoder(URI.create(uri2)), conf),
-        conf);
-    UserGroupInformation url21 = ugiProvider2.ugi();
-    UserGroupInformation url22 = ugiProvider2.ugi();
-
-    Assert.assertEquals(
-        "With UGI cache, two UGIs returned by the same token should be same",
-        url21, url22);
-
-    Assert.assertNotEquals(
-        "With UGI cache, two UGIs for the different token should not be same",
-        ugi11, url22);
-
-    Thread.sleep(EXPIRE_AFTER_ACCESS);
-    ugi12 = ugiProvider1.ugi();
-    url22 = ugiProvider2.ugi();
-
-    Assert
-        .assertNotEquals(
-            "With cache eviction, two UGIs returned by the same token should 
not be same",
-            ugi11, ugi12);
-
-    Assert
-        .assertNotEquals(
-            "With cache eviction, two UGIs returned by the same token should 
not be same",
-            url21, url22);
-
-    Assert.assertNotEquals(
-        "With UGI cache, two UGIs for the different token should not be same",
-        ugi11, url22);
-  }
-
-  @Test
-  public void testUGICacheInSecure() throws Exception {
-
-    final Configuration conf = WebHdfsTestUtil.createConf();
-    conf.setInt(DFSConfigKeys.DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_KEY,
-        EXPIRE_AFTER_ACCESS);
-
-    String uri1 = WebHdfsFileSystem.PATH_PREFIX
-        + PATH
-        + "?op=OPEN"
-        + Param.toSortedString("&", new OffsetParam((long) OFFSET),
-            new LengthParam((long) LENGTH), new UserParam("root"));
-
-    String uri2 = WebHdfsFileSystem.PATH_PREFIX
-        + PATH
-        + "?op=OPEN"
-        + Param.toSortedString("&", new OffsetParam((long) OFFSET),
-            new LengthParam((long) LENGTH), new UserParam("hdfs"));
-
-    DataNodeUGIProvider ugiProvider1 = new DataNodeUGIProvider(
-        new ParameterParser(new QueryStringDecoder(URI.create(uri1)), conf),
-        conf);
-    UserGroupInformation ugi11 = ugiProvider1.ugi();
-    UserGroupInformation ugi12 = ugiProvider1.ugi();
-
-    Assert.assertEquals(
-        "With UGI cache, two UGIs for the same user should be same", ugi11,
-        ugi12);
-
-    DataNodeUGIProvider ugiProvider2 = new DataNodeUGIProvider(
-        new ParameterParser(new QueryStringDecoder(URI.create(uri2)), conf),
-        conf);
-    UserGroupInformation url21 = ugiProvider2.ugi();
-    UserGroupInformation url22 = ugiProvider2.ugi();
-
-    Assert.assertEquals(
-        "With UGI cache, two UGIs for the same user should be same", url21,
-        url22);
-
-    Assert.assertNotEquals(
-        "With UGI cache, two UGIs for the different user should not be same",
-        ugi11, url22);
-
-    Thread.sleep(EXPIRE_AFTER_ACCESS);
-    ugi12 = ugiProvider1.ugi();
-    url22 = ugiProvider2.ugi();
-
-    Assert
-        .assertNotEquals(
-            "With cache eviction, two UGIs returned by the same user should 
not be same",
-            ugi11, ugi12);
-
-    Assert
-        .assertNotEquals(
-            "With cache eviction, two UGIs returned by the same user should 
not be same",
-            url21, url22);
-
-    Assert.assertNotEquals(
-        "With UGI cache, two UGIs for the different user should not be same",
-        ugi11, url22);
-  }
-
-  private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi,
-      Configuration conf, List<Token<DelegationTokenIdentifier>> tokens)
-      throws IOException {
-    if (UserGroupInformation.isSecurityEnabled()) {
-      DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(
-          ugi.getUserName()), null, null);
-      FSNamesystem namesystem = mock(FSNamesystem.class);
-      DelegationTokenSecretManager dtSecretManager = new 
DelegationTokenSecretManager(
-          86400000, 86400000, 86400000, 86400000, namesystem);
-      dtSecretManager.startThreads();
-      Token<DelegationTokenIdentifier> token1 = new 
Token<DelegationTokenIdentifier>(
-          dtId, dtSecretManager);
-      Token<DelegationTokenIdentifier> token2 = new 
Token<DelegationTokenIdentifier>(
-          dtId, dtSecretManager);
-      SecurityUtil.setTokenService(token1,
-          NetUtils.createSocketAddr(uri.getAuthority()));
-      SecurityUtil.setTokenService(token2,
-          NetUtils.createSocketAddr(uri.getAuthority()));
-      token1.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
-      token2.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
-
-      tokens.add(token1);
-      tokens.add(token2);
-
-      ugi.addToken(token1);
-      ugi.addToken(token2);
-    }
-    return (WebHdfsFileSystem) FileSystem.get(uri, conf);
-  }
-}

Reply via email to