Author: wheat9
Date: Wed Apr  2 17:28:08 2014
New Revision: 1584100

URL: http://svn.apache.org/r1584100
Log:
HDFS-5570. Deprecate hftp / hsftp and replace them with webhdfs / swebhdfs. 
Contributed by Haohui Mai.

Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
    
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1584100&r1=1584099&r2=1584100&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Apr  2 
17:28:08 2014
@@ -9,6 +9,9 @@ Trunk (Unreleased)
     HDFS-5079. Cleaning up NNHAStatusHeartbeat.State from
     DatanodeProtocolProtos. (Tao Luo via shv)
 
+    HDFS-5570. Deprecate hftp / hsftp and replace them with webhdfs / swebhdfs.
+    (wheat9)
+
   NEW FEATURES
 
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java?rev=1584100&r1=1584099&r2=1584100&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java
 Wed Apr  2 17:28:08 2014
@@ -41,7 +41,7 @@ public class DelegationTokenSelector
    * Select the delegation token for hdfs.  The port will be rewritten to
    * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port. 
    * This method should only be called by non-hdfs filesystems that do not
-   * use the rpc port to acquire tokens.  Ex. webhdfs, hftp 
+   * use the rpc port to acquire tokens.  Ex. webhdfs
    * @param nnUri of the remote namenode
    * @param tokens as a collection
    * @param conf hadoop configuration

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1584100&r1=1584099&r2=1584100&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 Wed Apr  2 17:28:08 2014
@@ -58,8 +58,6 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import 
org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
-import org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets;
-import org.apache.hadoop.hdfs.server.namenode.StreamFile;
 import 
org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.*;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
@@ -361,10 +359,6 @@ public class DataNode extends Configured
 
     this.infoServer = builder.build();
 
-    this.infoServer.addInternalServlet(null, "/streamFile/*", 
StreamFile.class);
-    this.infoServer.addInternalServlet(null, "/getFileChecksum/*",
-        FileChecksumServlets.GetServlet.class);
-    
     this.infoServer.setAttribute("datanode", this);
     this.infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
     this.infoServer.addServlet(null, "/blockScannerReport", 

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1584100&r1=1584099&r2=1584100&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 Wed Apr  2 17:28:08 2014
@@ -781,7 +781,7 @@ public class NameNode implements NameNod
 
   /**
    * @return NameNode HTTP address, used by the Web UI, image transfer,
-   *    and HTTP-based file system clients like Hftp and WebHDFS
+   *    and HTTP-based file system clients like WebHDFS
    */
   public InetSocketAddress getHttpAddress() {
     return httpServer.getHttpAddress();
@@ -789,7 +789,7 @@ public class NameNode implements NameNod
 
   /**
    * @return NameNode HTTPS address, used by the Web UI, image transfer,
-   *    and HTTP-based file system clients like Hftp and WebHDFS
+   *    and HTTP-based file system clients like WebHDFS
    */
   public InetSocketAddress getHttpsAddress() {
     return httpServer.getHttpsAddress();

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java?rev=1584100&r1=1584099&r2=1584100&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
 Wed Apr  2 17:28:08 2014
@@ -229,27 +229,10 @@ public class NameNodeHttpServer {
   private static void setupServlets(HttpServer2 httpServer, Configuration 
conf) {
     httpServer.addInternalServlet("startupProgress",
         StartupProgressServlet.PATH_SPEC, StartupProgressServlet.class);
-    httpServer.addInternalServlet("getDelegationToken",
-        GetDelegationTokenServlet.PATH_SPEC, 
-        GetDelegationTokenServlet.class, true);
-    httpServer.addInternalServlet("renewDelegationToken", 
-        RenewDelegationTokenServlet.PATH_SPEC, 
-        RenewDelegationTokenServlet.class, true);
-    httpServer.addInternalServlet("cancelDelegationToken", 
-        CancelDelegationTokenServlet.PATH_SPEC, 
-        CancelDelegationTokenServlet.class, true);
     httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class,
         true);
     httpServer.addInternalServlet("imagetransfer", ImageServlet.PATH_SPEC,
         ImageServlet.class, true);
-    httpServer.addInternalServlet("listPaths", "/listPaths/*",
-        ListPathsServlet.class, false);
-    httpServer.addInternalServlet("data", "/data/*",
-        FileDataServlet.class, false);
-    httpServer.addInternalServlet("checksum", "/fileChecksum/*",
-        FileChecksumServlets.RedirectServlet.class, false);
-    httpServer.addInternalServlet("contentSummary", "/contentSummary/*",
-        ContentSummaryServlet.class, false);
   }
 
   static FSImage getFsImageFromContext(ServletContext context) {

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1584100&r1=1584099&r2=1584100&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
 Wed Apr  2 17:28:08 2014
@@ -17,17 +17,11 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
-import java.io.BufferedReader;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
 import java.io.PrintStream;
-import java.net.HttpURLConnection;
-import java.net.InetSocketAddress;
 import java.net.URI;
-import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.Date;
@@ -43,23 +37,16 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
-import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
-import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
-import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
-import org.apache.hadoop.hdfs.web.HftpFileSystem;
-import org.apache.hadoop.hdfs.web.HsftpFileSystem;
-import org.apache.hadoop.hdfs.web.URLConnectionFactory;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.net.NetUtils;
+
+import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
-import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.GenericOptionsParser;
 
-import com.google.common.base.Charsets;
+import com.google.common.annotations.VisibleForTesting;
 
 /**
  * Fetch a DelegationToken from the current Namenode and store it in the
@@ -67,61 +54,38 @@ import com.google.common.base.Charsets;
  */
 @InterfaceAudience.Private
 public class DelegationTokenFetcher {
-  private static final Log LOG = 
-    LogFactory.getLog(DelegationTokenFetcher.class);
   private static final String WEBSERVICE = "webservice";
-  private static final String RENEWER = "renewer";
   private static final String CANCEL = "cancel";
-  private static final String RENEW = "renew";
-  private static final String PRINT = "print";
   private static final String HELP = "help";
   private static final String HELP_SHORT = "h";
+  private static final Log LOG = LogFactory
+      .getLog(DelegationTokenFetcher.class);
+  private static final String PRINT = "print";
+  private static final String RENEW = "renew";
+  private static final String RENEWER = "renewer";
 
-  private static void printUsage(PrintStream err) {
-    err.println("fetchdt retrieves delegation tokens from the NameNode");
-    err.println();
-    err.println("fetchdt <opts> <token file>");
-    err.println("Options:");
-    err.println("  --webservice <url>  Url to contact NN on");
-    err.println("  --renewer <name>    Name of the delegation token renewer");
-    err.println("  --cancel            Cancel the delegation token");
-    err.println("  --renew             Renew the delegation token.  Delegation 
" 
-               + "token must have been fetched using the --renewer <name> 
option.");
-    err.println("  --print             Print the delegation token");
-    err.println();
-    GenericOptionsParser.printGenericCommandUsage(err);
-    ExitUtil.terminate(1);    
-  }
-
-  private static Collection<Token<?>> readTokens(Path file, Configuration conf)
-      throws IOException {
-    Credentials creds = Credentials.readTokenStorageFile(file, conf);
-    return creds.getAllTokens();
-  }
-    
   /**
    * Command-line interface
    */
   public static void main(final String[] args) throws Exception {
     final Configuration conf = new HdfsConfiguration();
     Options fetcherOptions = new Options();
-    fetcherOptions.addOption(WEBSERVICE, true,
-        "HTTP url to reach the NameNode at");
-    fetcherOptions.addOption(RENEWER, true,
-        "Name of the delegation token renewer");
-    fetcherOptions.addOption(CANCEL, false, "cancel the token");
-    fetcherOptions.addOption(RENEW, false, "renew the token");
-    fetcherOptions.addOption(PRINT, false, "print the token");
-    fetcherOptions.addOption(HELP_SHORT, HELP, false, "print out help 
information");
+    fetcherOptions
+      .addOption(WEBSERVICE, true, "HTTP url to reach the NameNode at")
+      .addOption(RENEWER, true, "Name of the delegation token renewer")
+      .addOption(CANCEL, false, "cancel the token")
+      .addOption(RENEW, false, "renew the token")
+      .addOption(PRINT, false, "print the token")
+      .addOption(HELP_SHORT, HELP, false, "print out help information");
+
     GenericOptionsParser parser = new GenericOptionsParser(conf,
-        fetcherOptions, args);
+            fetcherOptions, args);
     CommandLine cmd = parser.getCommandLine();
-    
-    // get options
+
     final String webUrl = cmd.hasOption(WEBSERVICE) ? cmd
-        .getOptionValue(WEBSERVICE) : null;
-    final String renewer = cmd.hasOption(RENEWER) ? 
-        cmd.getOptionValue(RENEWER) : null;
+            .getOptionValue(WEBSERVICE) : null;
+    final String renewer = cmd.hasOption(RENEWER) ? cmd.getOptionValue
+            (RENEWER) : null;
     final boolean cancel = cmd.hasOption(CANCEL);
     final boolean renew = cmd.hasOption(RENEW);
     final boolean print = cmd.hasOption(PRINT);
@@ -133,8 +97,9 @@ public class DelegationTokenFetcher {
       printUsage(System.out);
       System.exit(0);
     }
-    if (cancel && renew || cancel && print || renew && print || cancel && renew
-        && print) {
+
+    int commandCount = (cancel ? 1 : 0) + (renew ? 1 : 0) + (print ? 1 : 0);
+    if (commandCount > 1) {
       System.err.println("ERROR: Only specify cancel, renew or print.");
       printUsage(System.err);
     }
@@ -145,248 +110,118 @@ public class DelegationTokenFetcher {
     // default to using the local file system
     FileSystem local = FileSystem.getLocal(conf);
     final Path tokenFile = new Path(local.getWorkingDirectory(), remaining[0]);
-    final URLConnectionFactory connectionFactory = 
URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY;
 
     // Login the current user
-    UserGroupInformation.getCurrentUser().doAs(
-        new PrivilegedExceptionAction<Object>() {
-          @Override
-          public Object run() throws Exception {
-
-            if (print) {
-              DelegationTokenIdentifier id = new DelegationTokenSecretManager(
-                  0, 0, 0, 0, null).createIdentifier();
-              for (Token<?> token : readTokens(tokenFile, conf)) {
-                DataInputStream in = new DataInputStream(
-                    new ByteArrayInputStream(token.getIdentifier()));
-                id.readFields(in);
-                System.out.println("Token (" + id + ") for " + 
-                                   token.getService());
-              }
-            } else if (cancel) {
-              for(Token<?> token: readTokens(tokenFile, conf)) {
-                if (token.isManaged()) {
-                  token.cancel(conf);
-                  if (LOG.isDebugEnabled()) {
-                    LOG.debug("Cancelled token for " + token.getService());
-                  }
-                }
-              }
-            } else if (renew) {
-              for (Token<?> token : readTokens(tokenFile, conf)) {
-                if (token.isManaged()) {
-                  long result = token.renew(conf);
-                  if (LOG.isDebugEnabled()) {
-                    LOG.debug("Renewed token for " + token.getService()
-                        + " until: " + new Date(result));
-                  }
-                }
-              }
-            } else {
-              // otherwise we are fetching
-              if (webUrl != null) {
-                Credentials creds = getDTfromRemote(connectionFactory, new URI(
-                    webUrl), renewer, null);
-                creds.writeTokenStorageFile(tokenFile, conf);
-                for (Token<?> token : creds.getAllTokens()) {
-                  if(LOG.isDebugEnabled()) {   
-                    LOG.debug("Fetched token via " + webUrl + " for "
-                        + token.getService() + " into " + tokenFile);
-                  }
-                }
-              } else {
-                FileSystem fs = FileSystem.get(conf);
-                Credentials cred = new Credentials();
-                Token<?> tokens[] = fs.addDelegationTokens(renewer, cred);
-                cred.writeTokenStorageFile(tokenFile, conf);
-                if(LOG.isDebugEnabled()) {
-                  for (Token<?> token : tokens) {
-                    LOG.debug("Fetched token for " + token.getService()
-                        + " into " + tokenFile);
-                  }
-                }
-              }
-            }
-            return null;
-          }
-        });
-  }
-  
-  static public Credentials getDTfromRemote(URLConnectionFactory factory,
-      URI nnUri, String renewer, String proxyUser) throws IOException {
-    StringBuilder buf = new StringBuilder(nnUri.toString())
-        .append(GetDelegationTokenServlet.PATH_SPEC);
-    String separator = "?";
-    if (renewer != null) {
-      buf.append("?").append(GetDelegationTokenServlet.RENEWER).append("=")
-          .append(renewer);
-      separator = "&";
-    }
-    if (proxyUser != null) {
-      buf.append(separator).append("doas=").append(proxyUser);
-    }
-
-    boolean isHttps = nnUri.getScheme().equals("https");
-
-    HttpURLConnection conn = null;
-    DataInputStream dis = null;
-    InetSocketAddress serviceAddr = NetUtils.createSocketAddr(nnUri
-        .getAuthority());
-
-    try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Retrieving token from: " + buf);
-      }
-
-      conn = run(factory, new URL(buf.toString()));
-      InputStream in = conn.getInputStream();
-      Credentials ts = new Credentials();
-      dis = new DataInputStream(in);
-      ts.readFields(dis);
-      for (Token<?> token : ts.getAllTokens()) {
-        token.setKind(isHttps ? HsftpFileSystem.TOKEN_KIND : 
HftpFileSystem.TOKEN_KIND);
-        SecurityUtil.setTokenService(token, serviceAddr);
-      }
-      return ts;
-    } catch (Exception e) {
-      throw new IOException("Unable to obtain remote token", e);
-    } finally {
-      IOUtils.cleanup(LOG, dis);
-      if (conn != null) {
-        conn.disconnect();
-      }
+    UserGroupInformation.getCurrentUser().doAs(new 
PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        if (print) {
+          printTokens(conf, tokenFile);
+        } else if (cancel) {
+          cancelTokens(conf, tokenFile);
+        } else if (renew) {
+          renewTokens(conf, tokenFile);
+        } else {
+          // otherwise we are fetching
+          FileSystem fs = getFileSystem(conf, webUrl);
+          saveDelegationToken(conf, fs, renewer, tokenFile);
+        }
+        return null;
+      }
+    });
+  }
+
+  private static FileSystem getFileSystem(Configuration conf, String url)
+          throws IOException {
+    if (url == null) {
+      return FileSystem.get(conf);
+    }
+
+    // For backward compatibility
+    URI fsUri = URI.create(
+            url.replaceFirst("^http://";, WebHdfsFileSystem.SCHEME + "://")
+               .replaceFirst("^https://";, SWebHdfsFileSystem.SCHEME + "://"));
+
+    return FileSystem.get(fsUri, conf);
+  }
+
+  @VisibleForTesting
+  static void cancelTokens(final Configuration conf, final Path tokenFile)
+          throws IOException, InterruptedException {
+    for (Token<?> token : readTokens(tokenFile, conf)) {
+      if (token.isManaged()) {
+        token.cancel(conf);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Cancelled token for " + token.getService());
+        }
+      }
+    }
+  }
+
+  @VisibleForTesting
+  static void renewTokens(final Configuration conf, final Path tokenFile)
+          throws IOException, InterruptedException {
+    for (Token<?> token : readTokens(tokenFile, conf)) {
+      if (token.isManaged()) {
+        long result = token.renew(conf);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Renewed token for " + token.getService() + " until: " +
+                  new Date(result));
+        }
+      }
+    }
+  }
+
+  @VisibleForTesting
+  static void saveDelegationToken(Configuration conf, FileSystem fs,
+                                  final String renewer, final Path tokenFile)
+          throws IOException {
+    Token<?> token = fs.getDelegationToken(renewer);
+
+    Credentials cred = new Credentials();
+    cred.addToken(token.getKind(), token);
+    cred.writeTokenStorageFile(tokenFile, conf);
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Fetched token " + fs.getUri() + " for " + token.getService()
+              + " into " + tokenFile);
+    }
+  }
+
+  private static void printTokens(final Configuration conf,
+                                  final Path tokenFile)
+          throws IOException {
+    DelegationTokenIdentifier id = new DelegationTokenSecretManager(0, 0, 0,
+            0, null).createIdentifier();
+    for (Token<?> token : readTokens(tokenFile, conf)) {
+      DataInputStream in = new DataInputStream(new ByteArrayInputStream(token
+              .getIdentifier()));
+      id.readFields(in);
+      System.out.println("Token (" + id + ") for " + token.getService());
     }
   }
 
-  /**
-   * Cancel a Delegation Token.
-   * @param nnAddr the NameNode's address
-   * @param tok the token to cancel
-   * @throws IOException
-   * @throws AuthenticationException
-   */
-  static public void cancelDelegationToken(URLConnectionFactory factory,
-      URI nnAddr, Token<DelegationTokenIdentifier> tok) throws IOException,
-      AuthenticationException {
-    StringBuilder buf = new StringBuilder(nnAddr.toString())
-        .append(CancelDelegationTokenServlet.PATH_SPEC).append("?")
-        .append(CancelDelegationTokenServlet.TOKEN).append("=")
-        .append(tok.encodeToUrlString());
-    HttpURLConnection conn = run(factory, new URL(buf.toString()));
-    conn.disconnect();
-  }
-
-  /**
-   * Renew a Delegation Token.
-   * @param nnAddr the NameNode's address
-   * @param tok the token to renew
-   * @return the Date that the token will expire next.
-   * @throws IOException
-   * @throws AuthenticationException
-   */
-  static public long renewDelegationToken(URLConnectionFactory factory,
-      URI nnAddr, Token<DelegationTokenIdentifier> tok) throws IOException,
-      AuthenticationException {
-    StringBuilder buf = new StringBuilder(nnAddr.toString())
-        .append(RenewDelegationTokenServlet.PATH_SPEC).append("?")
-        .append(RenewDelegationTokenServlet.TOKEN).append("=")
-        .append(tok.encodeToUrlString());
-
-    HttpURLConnection connection = null;
-    BufferedReader in = null;
-    try {
-      connection = run(factory, new URL(buf.toString()));
-      in = new BufferedReader(new InputStreamReader(
-          connection.getInputStream(), Charsets.UTF_8));
-      long result = Long.parseLong(in.readLine());
-      return result;
-    } catch (IOException ie) {
-      LOG.info("error in renew over HTTP", ie);
-      IOException e = getExceptionFromResponse(connection);
-
-      if (e != null) {
-        LOG.info("rethrowing exception from HTTP request: "
-            + e.getLocalizedMessage());
-        throw e;
-      }
-      throw ie;
-    } finally {
-      IOUtils.cleanup(LOG, in);
-      if (connection != null) {
-        connection.disconnect();
-      }
-    }
-  }
-
-  // parse the message and extract the name of the exception and the message
-  static private IOException getExceptionFromResponse(HttpURLConnection con) {
-    IOException e = null;
-    String resp;
-    if(con == null) 
-      return null;    
-    
-    try {
-      resp = con.getResponseMessage();
-    } catch (IOException ie) { return null; }
-    if(resp == null || resp.isEmpty())
-      return null;
-
-    String exceptionClass = "", exceptionMsg = "";
-    String[] rs = resp.split(";");
-    if(rs.length < 2)
-      return null;
-    exceptionClass = rs[0];
-    exceptionMsg = rs[1];
-    LOG.info("Error response from HTTP request=" + resp + 
-        ";ec=" + exceptionClass + ";em="+exceptionMsg);
-    
-    if(exceptionClass == null || exceptionClass.isEmpty())
-      return null;
-    
-    // recreate exception objects
-    try {
-      Class<? extends Exception> ec = 
-         Class.forName(exceptionClass).asSubclass(Exception.class);
-      // we are interested in constructor with String arguments
-      java.lang.reflect.Constructor<? extends Exception> constructor =
-          ec.getConstructor (new Class[] {String.class});
-
-      // create an instance
-      e =  (IOException) constructor.newInstance (exceptionMsg);
-
-    } catch (Exception ee)  {
-      LOG.warn("failed to create object of this class", ee);
-    }
-    if(e == null)
-      return null;
-    
-    e.setStackTrace(new StackTraceElement[0]); // local stack is not relevant
-    LOG.info("Exception from HTTP response=" + e.getLocalizedMessage());
-    return e;
+  private static void printUsage(PrintStream err) {
+    err.println("fetchdt retrieves delegation tokens from the NameNode");
+    err.println();
+    err.println("fetchdt <opts> <token file>");
+    err.println("Options:");
+    err.println("  --webservice <url>  Url to contact NN on (starts with " +
+            "http:// or https://)");
+    err.println("  --renewer <name>    Name of the delegation token renewer");
+    err.println("  --cancel            Cancel the delegation token");
+    err.println("  --renew             Renew the delegation token.  " +
+            "Delegation " + "token must have been fetched using the --renewer" 
+
+            " <name> option.");
+    err.println("  --print             Print the delegation token");
+    err.println();
+    GenericOptionsParser.printGenericCommandUsage(err);
+    ExitUtil.terminate(1);
   }
 
-  private static HttpURLConnection run(URLConnectionFactory factory, URL url)
-      throws IOException, AuthenticationException {
-    HttpURLConnection conn = null;
-
-    try {
-      conn = (HttpURLConnection) factory.openConnection(url, true);
-      if (conn.getResponseCode() != HttpURLConnection.HTTP_OK) {
-        String msg = conn.getResponseMessage();
-
-        throw new IOException("Error when dealing remote token: " + msg);
-      }
-    } catch (IOException ie) {
-      LOG.info("Error when dealing remote token:", ie);
-      IOException e = getExceptionFromResponse(conn);
-
-      if (e != null) {
-        LOG.info("rethrowing exception from HTTP request: "
-            + e.getLocalizedMessage());
-        throw e;
-      }
-      throw ie;
-    }
-    return conn;
+  private static Collection<Token<?>> readTokens(Path file, Configuration conf)
+          throws IOException {
+    Credentials creds = Credentials.readTokenStorageFile(file, conf);
+    return creds.getAllTokens();
   }
 }

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java?rev=1584100&r1=1584099&r2=1584100&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
 Wed Apr  2 17:28:08 2014
@@ -57,9 +57,7 @@ final class TokenAspect<T extends FileSy
 
     @Override
     public boolean handleKind(Text kind) {
-      return kind.equals(HftpFileSystem.TOKEN_KIND)
-          || kind.equals(HsftpFileSystem.TOKEN_KIND)
-          || kind.equals(WebHdfsFileSystem.TOKEN_KIND)
+      return kind.equals(WebHdfsFileSystem.TOKEN_KIND)
           || kind.equals(SWebHdfsFileSystem.TOKEN_KIND);
     }
 
@@ -89,11 +87,7 @@ final class TokenAspect<T extends FileSy
     }
 
     private static String getSchemeByKind(Text kind) {
-      if (kind.equals(HftpFileSystem.TOKEN_KIND)) {
-        return HftpFileSystem.SCHEME;
-      } else if (kind.equals(HsftpFileSystem.TOKEN_KIND)) {
-        return HsftpFileSystem.SCHEME;
-      } else if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) {
+      if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) {
         return WebHdfsFileSystem.SCHEME;
       } else if (kind.equals(SWebHdfsFileSystem.TOKEN_KIND)) {
         return SWebHdfsFileSystem.SCHEME;

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem?rev=1584100&r1=1584099&r2=1584100&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 Wed Apr  2 17:28:08 2014
@@ -14,7 +14,5 @@
 # limitations under the License.
 
 org.apache.hadoop.hdfs.DistributedFileSystem
-org.apache.hadoop.hdfs.web.HftpFileSystem
-org.apache.hadoop.hdfs.web.HsftpFileSystem
 org.apache.hadoop.hdfs.web.WebHdfsFileSystem
 org.apache.hadoop.hdfs.web.SWebHdfsFileSystem

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1584100&r1=1584099&r2=1584100&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 Wed Apr  2 17:28:08 2014
@@ -93,7 +93,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.apache.hadoop.hdfs.web.HftpFileSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetUtils;
@@ -1889,36 +1888,6 @@ public class MiniDFSCluster {
         + nameNodes[nnIndex].conf
             .get(DFS_NAMENODE_HTTP_ADDRESS_KEY);
   }
-  
-  /**
-   * @return a {@link HftpFileSystem} object.
-   */
-  public HftpFileSystem getHftpFileSystem(int nnIndex) throws IOException {
-    String uri = "hftp://";
-        + nameNodes[nnIndex].conf
-            .get(DFS_NAMENODE_HTTP_ADDRESS_KEY);
-    try {
-      return (HftpFileSystem)FileSystem.get(new URI(uri), conf);
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  /**
-   *  @return a {@link HftpFileSystem} object as specified user. 
-   */
-  public HftpFileSystem getHftpFileSystemAs(final String username,
-      final Configuration conf, final int nnIndex, final String... groups)
-      throws IOException, InterruptedException {
-    final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
-        username, groups);
-    return ugi.doAs(new PrivilegedExceptionAction<HftpFileSystem>() {
-      @Override
-      public HftpFileSystem run() throws Exception {
-        return getHftpFileSystem(nnIndex);
-      }
-    });
-  }
 
   /**
    * Get the directories where the namenode stores its image.

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=1584100&r1=1584099&r2=1584100&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 Wed Apr  2 17:28:08 2014
@@ -63,7 +63,6 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
-import org.apache.hadoop.hdfs.web.HftpFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -495,8 +494,6 @@ public class TestDistributedFileSystem {
 
   @Test
   public void testFileChecksum() throws Exception {
-    ((Log4JLogger)HftpFileSystem.LOG).getLogger().setLevel(Level.ALL);
-
     final long seed = RAN.nextLong();
     System.out.println("seed=" + seed);
     RAN.setSeed(seed);
@@ -530,17 +527,6 @@ public class TestDistributedFileSystem {
       assertTrue("Not throwing the intended exception message", e.getMessage()
           .contains("Path is not a file: /test/TestExistingDir"));
     }
-    
-    //hftp
-    final String hftpuri = "hftp://"; + nnAddr;
-    System.out.println("hftpuri=" + hftpuri);
-    final FileSystem hftp = ugi.doAs(
-        new PrivilegedExceptionAction<FileSystem>() {
-      @Override
-      public FileSystem run() throws Exception {
-        return new Path(hftpuri).getFileSystem(conf);
-      }
-    });
 
     //webhdfs
     final String webhdfsuri = WebHdfsFileSystem.SCHEME  + "://" + nnAddr;
@@ -578,14 +564,6 @@ public class TestDistributedFileSystem {
       final FileChecksum hdfsfoocs = hdfs.getFileChecksum(foo);
       System.out.println("hdfsfoocs=" + hdfsfoocs);
 
-      //hftp
-      final FileChecksum hftpfoocs = hftp.getFileChecksum(foo);
-      System.out.println("hftpfoocs=" + hftpfoocs);
-
-      final Path qualified = new Path(hftpuri + dir, "foo" + n);
-      final FileChecksum qfoocs = hftp.getFileChecksum(qualified);
-      System.out.println("qfoocs=" + qfoocs);
-
       //webhdfs
       final FileChecksum webhdfsfoocs = webhdfs.getFileChecksum(foo);
       System.out.println("webhdfsfoocs=" + webhdfsfoocs);
@@ -624,13 +602,6 @@ public class TestDistributedFileSystem {
         assertEquals(hdfsfoocs.hashCode(), barhashcode);
         assertEquals(hdfsfoocs, barcs);
 
-        //hftp
-        assertEquals(hftpfoocs.hashCode(), barhashcode);
-        assertEquals(hftpfoocs, barcs);
-
-        assertEquals(qfoocs.hashCode(), barhashcode);
-        assertEquals(qfoocs, barcs);
-
         //webhdfs
         assertEquals(webhdfsfoocs.hashCode(), barhashcode);
         assertEquals(webhdfsfoocs, barcs);
@@ -640,14 +611,6 @@ public class TestDistributedFileSystem {
       }
 
       hdfs.setPermission(dir, new FsPermission((short)0));
-      { //test permission error on hftp 
-        try {
-          hftp.getFileChecksum(qualified);
-          fail();
-        } catch(IOException ioe) {
-          FileSystem.LOG.info("GOOD: getting an exception", ioe);
-        }
-      }
 
       { //test permission error on webhdfs 
         try {

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java?rev=1584100&r1=1584099&r2=1584100&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
 Wed Apr  2 17:28:08 2014
@@ -39,7 +39,6 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.web.HftpFileSystem;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Level;
@@ -64,7 +63,6 @@ public class TestFileStatus {
   private static MiniDFSCluster cluster;
   private static FileSystem fs;
   private static FileContext fc;
-  private static HftpFileSystem hftpfs; 
   private static DFSClient dfsClient;
   private static Path file1;
   
@@ -75,7 +73,6 @@ public class TestFileStatus {
     cluster = new MiniDFSCluster.Builder(conf).build();
     fs = cluster.getFileSystem();
     fc = FileContext.getFileContext(cluster.getURI(0), conf);
-    hftpfs = cluster.getHftpFileSystem(0);
     dfsClient = new DFSClient(NameNode.getAddress(conf), conf);
     file1 = new Path("filestatus.dat");
     DFSTestUtil.createFile(fs, file1, fileSize, fileSize, blockSize, (short) 1,
@@ -208,8 +205,6 @@ public class TestFileStatus {
     assertEquals(dir + " should be empty", 0, stats.length);
     assertEquals(dir + " should be zero size ",
         0, fs.getContentSummary(dir).getLength());
-    assertEquals(dir + " should be zero size using hftp",
-        0, hftpfs.getContentSummary(dir).getLength());
     
     RemoteIterator<FileStatus> itor = fc.listStatus(dir);
     assertFalse(dir + " should be empty", itor.hasNext());
@@ -239,9 +234,7 @@ public class TestFileStatus {
     final int expected = blockSize/2;  
     assertEquals(dir + " size should be " + expected, 
         expected, fs.getContentSummary(dir).getLength());
-    assertEquals(dir + " size should be " + expected + " using hftp", 
-        expected, hftpfs.getContentSummary(dir).getLength());
-    
+
     // Test listStatus on a non-empty directory
     stats = fs.listStatus(dir);
     assertEquals(dir + " should have two entries", 2, stats.length);
@@ -290,19 +283,9 @@ public class TestFileStatus {
     assertEquals(dir5.toString(), itor.next().getPath().toString());
     assertEquals(file2.toString(), itor.next().getPath().toString());
     assertEquals(file3.toString(), itor.next().getPath().toString());
+
     assertFalse(itor.hasNext());      
 
-    { //test permission error on hftp 
-      fs.setPermission(dir, new FsPermission((short)0));
-      try {
-        final String username = 
UserGroupInformation.getCurrentUser().getShortUserName() + "1";
-        final HftpFileSystem hftp2 = cluster.getHftpFileSystemAs(username, 
conf, 0, "somegroup");
-        hftp2.getContentSummary(dir);
-        fail();
-      } catch(IOException ioe) {
-        FileSystem.LOG.info("GOOD: getting an exception", ioe);
-      }
-    }
     fs.delete(dir, true);
   }
 }

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java?rev=1584100&r1=1584099&r2=1584100&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
 Wed Apr  2 17:28:08 2014
@@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.web.HftpFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.security.AccessControlException;
@@ -221,30 +220,6 @@ public class TestAuditLogs {
     assertTrue("failed to stat file", st != null && st.isFile());
   }
 
-  /** test that access via Hftp puts proper entry in audit log */
-  @Test
-  public void testAuditHftp() throws Exception {
-    final Path file = new Path(fnames[0]);
-
-    final String hftpUri =
-      "hftp://"; + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
-
-    HftpFileSystem hftpFs = null;
-
-    setupAuditLogs();
-    try {
-      hftpFs = (HftpFileSystem) new Path(hftpUri).getFileSystem(conf);
-      InputStream istream = hftpFs.open(file);
-      @SuppressWarnings("unused")
-      int val = istream.read();
-      istream.close();
-
-      verifyAuditLogs(true);
-    } finally {
-      if (hftpFs != null) hftpFs.close();
-    }
-  }
-
   /** test that denied access via webhdfs puts proper entry in audit log */
   @Test
   public void testAuditWebHdfsDenied() throws Exception {

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java?rev=1584100&r1=1584099&r2=1584100&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
 Wed Apr  2 17:28:08 2014
@@ -83,16 +83,6 @@ public class TestHttpsFileSystem {
   }
 
   @Test
-  public void testHsftpFileSystem() throws Exception {
-    FileSystem fs = FileSystem.get(new URI("hsftp://"; + nnAddr), conf);
-    Assert.assertTrue(fs.exists(new Path("/test")));
-    InputStream is = fs.open(new Path("/test"));
-    Assert.assertEquals(23, is.read());
-    is.close();
-    fs.close();
-  }
-
-  @Test
   public void testSWebHdfsFileSystem() throws Exception {
     FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, "swebhdfs");
     final Path f = new Path("/testswebhdfs");

Modified: 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java?rev=1584100&r1=1584099&r2=1584100&view=diff
==============================================================================
--- 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java
 (original)
+++ 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java
 Wed Apr  2 17:28:08 2014
@@ -27,7 +27,7 @@ import org.apache.hadoop.security.token.
 public class FakeRenewer extends TokenRenewer {
   static Token<?> lastRenewed = null;
   static Token<?> lastCanceled = null;
-  static final Text KIND = new Text("TESTING-TOKEN-KIND");
+  public static final Text KIND = new Text("TESTING-TOKEN-KIND");
 
   @Override
   public boolean handleKind(Text kind) {
@@ -54,4 +54,12 @@ public class FakeRenewer extends TokenRe
     lastRenewed = null;
     lastCanceled = null;
   }
+
+  public static Token<?> getLastRenewed() {
+    return lastRenewed;
+  }
+
+  public static Token<?> getLastCanceled() {
+    return lastCanceled;
+  }
 }


Reply via email to