Modified: 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
 (original)
+++ 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
 Thu Dec  5 23:41:09 2013
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -37,6 +39,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.web.AuthFilter;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.Param;
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
@@ -60,76 +63,116 @@ public class NameNodeHttpServer {
   public static final String FSIMAGE_ATTRIBUTE_KEY = "name.system.image";
   protected static final String NAMENODE_ATTRIBUTE_KEY = "name.node";
   public static final String STARTUP_PROGRESS_ATTRIBUTE_KEY = 
"startup.progress";
-  
-  public NameNodeHttpServer(
-      Configuration conf,
-      NameNode nn,
+
+  NameNodeHttpServer(Configuration conf, NameNode nn,
       InetSocketAddress bindAddress) {
     this.conf = conf;
     this.nn = nn;
     this.bindAddress = bindAddress;
   }
-  
+
+  private void initWebHdfs(Configuration conf) throws IOException {
+    if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) {
+      //add SPNEGO authentication filter for webhdfs
+      final String name = "SPNEGO";
+      final String classname = AuthFilter.class.getName();
+      final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
+      Map<String, String> params = getAuthFilterParams(conf);
+      HttpServer.defineFilter(httpServer.getWebAppContext(), name, classname, 
params,
+          new String[]{pathSpec});
+      HttpServer.LOG.info("Added filter '" + name + "' (class=" + classname + 
")");
+
+      // add webhdfs packages
+      httpServer.addJerseyResourcePackage(
+          NamenodeWebHdfsMethods.class.getPackage().getName()
+              + ";" + Param.class.getPackage().getName(), pathSpec);
+    }
+  }
+
+  /**
+   * @see DFSUtil#getHttpPolicy(org.apache.hadoop.conf.Configuration)
+   * for information related to the different configuration options and
+   * Http Policy is decided.
+   */
   void start() throws IOException {
+    HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
     final String infoHost = bindAddress.getHostName();
-    int infoPort = bindAddress.getPort();
-    HttpServer.Builder builder = new HttpServer.Builder().setName("hdfs")
-        .addEndpoint(URI.create(("http://"; + 
NetUtils.getHostPortString(bindAddress))))
-        .setFindPort(infoPort == 0).setConf(conf).setACL(
-            new AccessControlList(conf.get(DFS_ADMIN, " ")))
+
+    HttpServer.Builder builder = new HttpServer.Builder()
+        .setName("hdfs")
+        .setConf(conf)
+        .setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
         .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
         .setUsernameConfKey(
             DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
-        .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
-            DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
+        .setKeytabConfKey(
+            DFSUtil.getSpnegoKeytabKey(conf,
+                DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
+
+    if (policy.isHttpEnabled()) {
+      int port = bindAddress.getPort();
+      if (port == 0) {
+        builder.setFindPort(true);
+      }
+      builder.addEndpoint(URI.create("http://"; + infoHost + ":" + port));
+    }
 
-    boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, 
false);
-    if (certSSL) {
-      httpsAddress = NetUtils.createSocketAddr(conf.get(
+    if (policy.isHttpsEnabled()) {
+      final String httpsAddrString = conf.get(
           DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
-          DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
+          DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT);
+      InetSocketAddress addr = NetUtils.createSocketAddr(httpsAddrString);
 
-      builder.addEndpoint(URI.create("https://";
-          + NetUtils.getHostPortString(httpsAddress)));
       Configuration sslConf = new Configuration(false);
-      sslConf.setBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf
-          .getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
-              DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
+
+      sslConf.addResource(conf.get(
+          DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+          DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
+
       sslConf.addResource(conf.get(
           DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
           DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
+      sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf.getBoolean(
+          DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
       DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf);
+
+      if (addr.getPort() == 0) {
+        builder.setFindPort(true);
+      }
+
+      builder.addEndpoint(URI.create("https://";
+          + NetUtils.getHostPortString(addr)));
     }
 
     httpServer = builder.build();
-    if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) {
-      //add SPNEGO authentication filter for webhdfs
-      final String name = "SPNEGO";
-      final String classname = AuthFilter.class.getName();
-      final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
-      Map<String, String> params = getAuthFilterParams(conf);
-      HttpServer.defineFilter(httpServer.getWebAppContext(), name, classname, 
params,
-          new String[]{pathSpec});
-      HttpServer.LOG.info("Added filter '" + name + "' (class=" + classname + 
")");
 
-      // add webhdfs packages
-      httpServer.addJerseyResourcePackage(
-          NamenodeWebHdfsMethods.class.getPackage().getName()
-          + ";" + Param.class.getPackage().getName(), pathSpec);
-      }
+    if (policy.isHttpsEnabled()) {
+      // assume same ssl port for all datanodes
+      InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
+          DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":"
+              + DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT));
+      httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY,
+          datanodeSslPort.getPort());
+    }
+
+    initWebHdfs(conf);
 
     httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
     httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
     setupServlets(httpServer, conf);
     httpServer.start();
-    httpAddress = httpServer.getConnectorAddress(0);
-    if (certSSL) {
-      httpsAddress = httpServer.getConnectorAddress(1);
-      // assume same ssl port for all datanodes
-      InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
-        DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475));
-      httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, 
datanodeSslPort
-        .getPort());
+
+    int connIdx = 0;
+    if (policy.isHttpEnabled()) {
+      httpAddress = httpServer.getConnectorAddress(connIdx++);
+      conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
+          NetUtils.getHostPortString(httpAddress));
+    }
+
+    if (policy.isHttpsEnabled()) {
+      httpsAddress = httpServer.getConnectorAddress(connIdx);
+      conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
+          NetUtils.getHostPortString(httpsAddress));
     }
   }
   
@@ -165,18 +208,17 @@ public class NameNodeHttpServer {
     return params;
   }
 
-
-  public void stop() throws Exception {
+  void stop() throws Exception {
     if (httpServer != null) {
       httpServer.stop();
     }
   }
 
-  public InetSocketAddress getHttpAddress() {
+  InetSocketAddress getHttpAddress() {
     return httpAddress;
   }
 
-  public InetSocketAddress getHttpsAddress() {
+  InetSocketAddress getHttpsAddress() {
     return httpsAddress;
   }
 
@@ -185,7 +227,7 @@ public class NameNodeHttpServer {
    * 
    * @param fsImage FSImage to set
    */
-  public void setFSImage(FSImage fsImage) {
+  void setFSImage(FSImage fsImage) {
     httpServer.setAttribute(FSIMAGE_ATTRIBUTE_KEY, fsImage);
   }
 
@@ -194,7 +236,7 @@ public class NameNodeHttpServer {
    * 
    * @param nameNodeAddress InetSocketAddress to set
    */
-  public void setNameNodeAddress(InetSocketAddress nameNodeAddress) {
+  void setNameNodeAddress(InetSocketAddress nameNodeAddress) {
     httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY,
         NetUtils.getConnectAddress(nameNodeAddress));
   }
@@ -204,7 +246,7 @@ public class NameNodeHttpServer {
    * 
    * @param prog StartupProgress to set
    */
-  public void setStartupProgress(StartupProgress prog) {
+  void setStartupProgress(StartupProgress prog) {
     httpServer.setAttribute(STARTUP_PROGRESS_ATTRIBUTE_KEY, prog);
   }
 
@@ -234,7 +276,7 @@ public class NameNodeHttpServer {
         ContentSummaryServlet.class, false);
   }
 
-  public static FSImage getFsImageFromContext(ServletContext context) {
+  static FSImage getFsImageFromContext(ServletContext context) {
     return (FSImage)context.getAttribute(FSIMAGE_ATTRIBUTE_KEY);
   }
 
@@ -242,7 +284,7 @@ public class NameNodeHttpServer {
     return (NameNode)context.getAttribute(NAMENODE_ATTRIBUTE_KEY);
   }
 
-  public static Configuration getConfFromContext(ServletContext context) {
+  static Configuration getConfFromContext(ServletContext context) {
     return (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
   }
 
@@ -258,7 +300,7 @@ public class NameNodeHttpServer {
    * @param context ServletContext to get
    * @return StartupProgress associated with context
    */
-  public static StartupProgress getStartupProgressFromContext(
+  static StartupProgress getStartupProgressFromContext(
       ServletContext context) {
     return 
(StartupProgress)context.getAttribute(STARTUP_PROGRESS_ATTRIBUTE_KEY);
   }

Modified: 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 (original)
+++ 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 Thu Dec  5 23:41:09 2013
@@ -36,7 +36,6 @@ import java.util.Set;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BatchedRemoteIterator;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
@@ -46,8 +45,8 @@ import org.apache.hadoop.fs.InvalidPathE
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.ha.HAServiceStatus;
@@ -1254,36 +1253,13 @@ class NameNodeRpcServer implements Namen
     namesystem.removeCacheDirective(id);
   }
 
-  private class ServerSideCacheEntriesIterator 
-      extends BatchedRemoteIterator<Long, CacheDirectiveEntry> {
-
-    private final CacheDirectiveInfo filter;
-    
-    public ServerSideCacheEntriesIterator (Long firstKey, 
-        CacheDirectiveInfo filter) {
-      super(firstKey);
-      this.filter = filter;
-    }
-
-    @Override
-    public BatchedEntries<CacheDirectiveEntry> makeRequest(
-        Long nextKey) throws IOException {
-      return namesystem.listCacheDirectives(nextKey, filter);
-    }
-
-    @Override
-    public Long elementToPrevKey(CacheDirectiveEntry entry) {
-      return entry.getInfo().getId();
-    }
-  }
-  
   @Override
-  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(long prevId,
+  public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(long prevId,
       CacheDirectiveInfo filter) throws IOException {
     if (filter == null) {
       filter = new CacheDirectiveInfo.Builder().build();
     }
-    return new ServerSideCacheEntriesIterator(prevId, filter);
+    return namesystem.listCacheDirectives(prevId, filter);
   }
 
   @Override
@@ -1301,29 +1277,10 @@ class NameNodeRpcServer implements Namen
     namesystem.removeCachePool(cachePoolName);
   }
 
-  private class ServerSideCachePoolIterator 
-      extends BatchedRemoteIterator<String, CachePoolEntry> {
-
-    public ServerSideCachePoolIterator(String prevKey) {
-      super(prevKey);
-    }
-
-    @Override
-    public BatchedEntries<CachePoolEntry> makeRequest(String prevKey)
-        throws IOException {
-      return namesystem.listCachePools(prevKey);
-    }
-
-    @Override
-    public String elementToPrevKey(CachePoolEntry entry) {
-      return entry.getInfo().getPoolName();
-    }
-  }
-
   @Override
-  public RemoteIterator<CachePoolEntry> listCachePools(String prevKey)
+  public BatchedEntries<CachePoolEntry> listCachePools(String prevKey)
       throws IOException {
-    return new ServerSideCachePoolIterator(prevKey);
+    return namesystem.listCachePools(prevKey != null ? prevKey : "");
   }
 }
 

Modified: 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
 (original)
+++ 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
 Thu Dec  5 23:41:09 2013
@@ -30,7 +30,6 @@ import java.io.FilenameFilter;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
-import java.net.URISyntaxException;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
@@ -257,12 +256,7 @@ public class SecondaryNameNode implement
 
     // initialize the webserver for uploading files.
     int tmpInfoPort = infoSocAddr.getPort();
-    URI httpEndpoint;
-    try {
-      httpEndpoint = new URI("http://"; + 
NetUtils.getHostPortString(infoSocAddr));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
+    URI httpEndpoint = URI.create("http://"; + 
NetUtils.getHostPortString(infoSocAddr));
 
     infoServer = new HttpServer.Builder().setName("secondary")
         .addEndpoint(httpEndpoint)
@@ -273,6 +267,7 @@ public class SecondaryNameNode implement
             DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
         .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
             DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY)).build();
+
     infoServer.setAttribute("secondary.name.node", this);
     infoServer.setAttribute("name.system.image", checkpointImage);
     infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);

Modified: 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 (original)
+++ 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 Thu Dec  5 23:41:09 2013
@@ -249,8 +249,12 @@ public class NamenodeWebHdfsMethods {
         + Param.toSortedString("&", parameters);
     final String uripath = WebHdfsFileSystem.PATH_PREFIX + path;
 
-    final URI uri = new URI("http", null, dn.getHostName(), dn.getInfoPort(),
-        uripath, query, null);
+    final String scheme = request.getScheme();
+    int port = "http".equals(scheme) ? dn.getInfoPort() : dn
+        .getInfoSecurePort();
+    final URI uri = new URI(scheme, null, dn.getHostName(), port, uripath,
+        query, null);
+
     if (LOG.isTraceEnabled()) {
       LOG.trace("redirectURI=" + uri);
     }

Modified: 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
 (original)
+++ 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
 Thu Dec  5 23:41:09 2013
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolStats;
 import org.apache.hadoop.hdfs.server.namenode.CachePool;
 import org.apache.hadoop.hdfs.tools.TableListing.Justification;
 import org.apache.hadoop.ipc.RemoteException;
@@ -477,9 +478,10 @@ public class CacheAdmin extends Configur
           addField("EXPIRY", Justification.LEFT).
           addField("PATH", Justification.LEFT);
       if (printStats) {
-        tableBuilder.addField("NEEDED", Justification.RIGHT).
-                    addField("CACHED", Justification.RIGHT).
-                    addField("FILES", Justification.RIGHT);
+        tableBuilder.addField("BYTES_NEEDED", Justification.RIGHT).
+                    addField("BYTES_CACHED", Justification.RIGHT).
+                    addField("FILES_NEEDED", Justification.RIGHT).
+                    addField("FILES_CACHED", Justification.RIGHT);
       }
       TableListing tableListing = tableBuilder.build();
 
@@ -507,7 +509,8 @@ public class CacheAdmin extends Configur
         if (printStats) {
           row.add("" + stats.getBytesNeeded());
           row.add("" + stats.getBytesCached());
-          row.add("" + stats.getFilesAffected());
+          row.add("" + stats.getFilesNeeded());
+          row.add("" + stats.getFilesCached());
         }
         tableListing.addRow(row.toArray(new String[0]));
         numEntries++;
@@ -769,13 +772,14 @@ public class CacheAdmin extends Configur
 
     @Override
     public String getShortUsage() {
-      return "[" + getName() + " [name]]\n";
+      return "[" + getName() + " [-stats] [<name>]]\n";
     }
 
     @Override
     public String getLongUsage() {
       TableListing listing = getOptionDescriptionListing();
-      listing.addRow("[name]", "If specified, list only the named cache 
pool.");
+      listing.addRow("-stats", "Display additional cache pool statistics.");
+      listing.addRow("<name>", "If specified, list only the named cache 
pool.");
 
       return getShortUsage() + "\n" +
           WordUtils.wrap("Display information about one or more cache pools, " 
+
@@ -787,6 +791,7 @@ public class CacheAdmin extends Configur
     @Override
     public int run(Configuration conf, List<String> args) throws IOException {
       String name = StringUtils.popFirstNonOption(args);
+      final boolean printStats = StringUtils.popOption("-stats", args);
       if (!args.isEmpty()) {
         System.err.print("Can't understand arguments: " +
           Joiner.on(" ").join(args) + "\n");
@@ -794,28 +799,42 @@ public class CacheAdmin extends Configur
         return 1;
       }
       DistributedFileSystem dfs = getDFS(conf);
-      TableListing listing = new TableListing.Builder().
+      TableListing.Builder builder = new TableListing.Builder().
           addField("NAME", Justification.LEFT).
           addField("OWNER", Justification.LEFT).
           addField("GROUP", Justification.LEFT).
           addField("MODE", Justification.LEFT).
-          addField("WEIGHT", Justification.RIGHT).
-          build();
+          addField("WEIGHT", Justification.RIGHT);
+      if (printStats) {
+        builder.
+            addField("BYTES_NEEDED", Justification.RIGHT).
+            addField("BYTES_CACHED", Justification.RIGHT).
+            addField("FILES_NEEDED", Justification.RIGHT).
+            addField("FILES_CACHED", Justification.RIGHT);
+      }
+      TableListing listing = builder.build();
       int numResults = 0;
       try {
         RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
         while (iter.hasNext()) {
           CachePoolEntry entry = iter.next();
           CachePoolInfo info = entry.getInfo();
-          String[] row = new String[5];
+          LinkedList<String> row = new LinkedList<String>();
           if (name == null || info.getPoolName().equals(name)) {
-            row[0] = info.getPoolName();
-            row[1] = info.getOwnerName();
-            row[2] = info.getGroupName();
-            row[3] = info.getMode() != null ? info.getMode().toString() : null;
-            row[4] =
-                info.getWeight() != null ? info.getWeight().toString() : null;
-            listing.addRow(row);
+            row.add(info.getPoolName());
+            row.add(info.getOwnerName());
+            row.add(info.getGroupName());
+            row.add(info.getMode() != null ? info.getMode().toString() : null);
+            row.add(
+                info.getWeight() != null ? info.getWeight().toString() : null);
+            if (printStats) {
+              CachePoolStats stats = entry.getStats();
+              row.add(Long.toString(stats.getBytesNeeded()));
+              row.add(Long.toString(stats.getBytesCached()));
+              row.add(Long.toString(stats.getFilesNeeded()));
+              row.add(Long.toString(stats.getFilesCached()));
+            }
+            listing.addRow(row.toArray(new String[] {}));
             ++numResults;
             if (name != null) {
               break;

Modified: 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
 (original)
+++ 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
 Thu Dec  5 23:41:09 2013
@@ -380,8 +380,9 @@ message CacheDirectiveInfoExpirationProt
 message CacheDirectiveStatsProto {
   required int64 bytesNeeded = 1;
   required int64 bytesCached = 2;
-  required int64 filesAffected = 3;
-  required bool hasExpired = 4;
+  required int64 filesNeeded = 3;
+  required int64 filesCached = 4;
+  required bool hasExpired = 5;
 }
 
 message AddCacheDirectiveRequestProto {
@@ -432,7 +433,8 @@ message CachePoolInfoProto {
 message CachePoolStatsProto {
   required int64 bytesNeeded = 1;
   required int64 bytesCached = 2;
-  required int64 filesAffected = 3;
+  required int64 filesNeeded = 3;
+  required int64 filesCached = 4;
 }
 
 message AddCachePoolRequestProto {

Modified: 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
 (original)
+++ 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
 Thu Dec  5 23:41:09 2013
@@ -137,7 +137,20 @@
 <property>
   <name>dfs.https.enable</name>
   <value>false</value>
+  <description>
+    Deprecated. Use "dfs.http.policy" instead.
+  </description>
+</property>
+
+<property>
+  <name>dfs.http.policy</name>
+  <value>HTTP_ONLY</value>
   <description>Decide if HTTPS(SSL) is supported on HDFS
+    This configures the HTTP endpoint for HDFS daemons:
+      The following values are supported:
+      - HTTP_ONLY : Service is provided only on http
+      - HTTPS_ONLY : Service is provided only on https
+      - HTTP_AND_HTTPS : Service is provided both on http and https
   </description>
 </property>
 

Modified: 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 (original)
+++ 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 Thu Dec  5 23:41:09 2013
@@ -33,6 +33,7 @@ import static org.apache.hadoop.hdfs.DFS
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY;
@@ -906,12 +907,17 @@ public class MiniDFSCluster {
     
     // After the NN has started, set back the bound ports into
     // the conf
-    conf.set(DFSUtil.addKeySuffixes(
-        DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId),
-        nn.getNameNodeAddressHostPortString());
-    conf.set(DFSUtil.addKeySuffixes(
-        DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NetUtils
-        .getHostPortString(nn.getHttpAddress()));
+    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
+        nameserviceId, nnId), nn.getNameNodeAddressHostPortString());
+    if (nn.getHttpAddress() != null) {
+      conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY,
+          nameserviceId, nnId), 
NetUtils.getHostPortString(nn.getHttpAddress()));
+    }
+    if (nn.getHttpsAddress() != null) {
+      conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTPS_ADDRESS_KEY,
+          nameserviceId, nnId), 
NetUtils.getHostPortString(nn.getHttpsAddress()));
+    }
+
     DFSUtil.setGenericConf(conf, nameserviceId, nnId,
         DFS_NAMENODE_HTTP_ADDRESS_KEY);
     nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId,
@@ -1188,9 +1194,8 @@ public class MiniDFSCluster {
 
       SecureResources secureResources = null;
       if (UserGroupInformation.isSecurityEnabled()) {
-        SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, dnConf);
         try {
-          secureResources = 
SecureDataNodeStarter.getSecureResources(sslFactory, dnConf);
+          secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
         } catch (Exception ex) {
           ex.printStackTrace();
         }

Modified: 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
 (original)
+++ 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
 Thu Dec  5 23:41:09 2013
@@ -158,9 +158,8 @@ public class MiniDFSClusterWithNodeGroup
       
       SecureResources secureResources = null;
       if (UserGroupInformation.isSecurityEnabled()) {
-        SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, dnConf);
         try {
-          secureResources = 
SecureDataNodeStarter.getSecureResources(sslFactory, dnConf);
+          secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
         } catch (Exception ex) {
           ex.printStackTrace();
         }

Modified: 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
 (original)
+++ 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
 Thu Dec  5 23:41:09 2013
@@ -57,10 +57,12 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
+import org.apache.hadoop.hdfs.protocol.CachePoolStats;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@@ -622,45 +624,111 @@ public class TestCacheDirectives {
     }, 500, 60000);
   }
 
-  private static void waitForCachedStats(final DistributedFileSystem dfs,
-      final long targetFilesAffected, final long targetBytesNeeded,
-        final long targetBytesCached,
-          final CacheDirectiveInfo filter, final String infoString)
+  private static void waitForCacheDirectiveStats(final DistributedFileSystem 
dfs,
+      final long targetBytesNeeded, final long targetBytesCached,
+      final long targetFilesNeeded, final long targetFilesCached,
+      final CacheDirectiveInfo filter, final String infoString)
             throws Exception {
-      LOG.info("Polling listDirectives{" + 
-          ((filter == null) ? "ALL" : filter.toString()) +
-          " for " + targetFilesAffected + " targetFilesAffected, " +
-          targetBytesNeeded + " targetBytesNeeded, " +
-          targetBytesCached + " targetBytesCached");
-      GenericTestUtils.waitFor(new Supplier<Boolean>() {
-        @Override
-        public Boolean get() {
-          RemoteIterator<CacheDirectiveEntry> iter = null;
-          CacheDirectiveEntry entry = null;
+    LOG.info("Polling listCacheDirectives " + 
+        ((filter == null) ? "ALL" : filter.toString()) + " for " +
+        targetBytesNeeded + " targetBytesNeeded, " +
+        targetBytesCached + " targetBytesCached, " +
+        targetFilesNeeded + " targetFilesNeeded, " +
+        targetFilesCached + " targetFilesCached");
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        RemoteIterator<CacheDirectiveEntry> iter = null;
+        CacheDirectiveEntry entry = null;
+        try {
+          iter = dfs.listCacheDirectives(filter);
+          entry = iter.next();
+        } catch (IOException e) {
+          fail("got IOException while calling " +
+              "listCacheDirectives: " + e.getMessage());
+        }
+        Assert.assertNotNull(entry);
+        CacheDirectiveStats stats = entry.getStats();
+        if ((targetBytesNeeded == stats.getBytesNeeded()) &&
+            (targetBytesCached == stats.getBytesCached()) &&
+            (targetFilesNeeded == stats.getFilesNeeded()) &&
+            (targetFilesCached == stats.getFilesCached())) {
+          return true;
+        } else {
+          LOG.info(infoString + ": " +
+              "filesNeeded: " +
+              stats.getFilesNeeded() + "/" + targetFilesNeeded +
+              ", filesCached: " + 
+              stats.getFilesCached() + "/" + targetFilesCached +
+              ", bytesNeeded: " +
+              stats.getBytesNeeded() + "/" + targetBytesNeeded +
+              ", bytesCached: " + 
+              stats.getBytesCached() + "/" + targetBytesCached);
+          return false;
+        }
+      }
+    }, 500, 60000);
+  }
+
+  private static void waitForCachePoolStats(final DistributedFileSystem dfs,
+      final long targetBytesNeeded, final long targetBytesCached,
+      final long targetFilesNeeded, final long targetFilesCached,
+      final CachePoolInfo pool, final String infoString)
+            throws Exception {
+    LOG.info("Polling listCachePools " + pool.toString() + " for " +
+        targetBytesNeeded + " targetBytesNeeded, " +
+        targetBytesCached + " targetBytesCached, " +
+        targetFilesNeeded + " targetFilesNeeded, " +
+        targetFilesCached + " targetFilesCached");
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        RemoteIterator<CachePoolEntry> iter = null;
+        try {
+          iter = dfs.listCachePools();
+        } catch (IOException e) {
+          fail("got IOException while calling " +
+              "listCachePools: " + e.getMessage());
+        }
+        while (true) {
+          CachePoolEntry entry = null;
           try {
-            iter = dfs.listCacheDirectives(filter);
+            if (!iter.hasNext()) {
+              break;
+            }
             entry = iter.next();
           } catch (IOException e) {
-            fail("got IOException while calling " +
-                "listCacheDirectives: " + e.getMessage());
+            fail("got IOException while iterating through " +
+                "listCachePools: " + e.getMessage());
+          }
+          if (entry == null) {
+            break;
+          }
+          if (!entry.getInfo().getPoolName().equals(pool.getPoolName())) {
+            continue;
           }
-          Assert.assertNotNull(entry);
-          CacheDirectiveStats stats = entry.getStats();
-          if ((targetFilesAffected == stats.getFilesAffected()) &&
-              (targetBytesNeeded == stats.getBytesNeeded()) &&
-              (targetBytesCached == stats.getBytesCached())) {
+          CachePoolStats stats = entry.getStats();
+          if ((targetBytesNeeded == stats.getBytesNeeded()) &&
+              (targetBytesCached == stats.getBytesCached()) &&
+              (targetFilesNeeded == stats.getFilesNeeded()) &&
+              (targetFilesCached == stats.getFilesCached())) {
             return true;
           } else {
-            LOG.info(infoString + ": filesAffected: " + 
-              stats.getFilesAffected() + "/" + targetFilesAffected +
-              ", bytesNeeded: " +
+            LOG.info(infoString + ": " +
+                "filesNeeded: " +
+                stats.getFilesNeeded() + "/" + targetFilesNeeded +
+                ", filesCached: " + 
+                stats.getFilesCached() + "/" + targetFilesCached +
+                ", bytesNeeded: " +
                 stats.getBytesNeeded() + "/" + targetBytesNeeded +
-              ", bytesCached: " + 
+                ", bytesCached: " + 
                 stats.getBytesCached() + "/" + targetBytesCached);
             return false;
           }
         }
-      }, 500, 60000);
+        return false;
+      }
+    }, 500, 60000);
   }
 
   private static void checkNumCachedReplicas(final DistributedFileSystem dfs,
@@ -763,7 +831,7 @@ public class TestCacheDirectives {
       }
       // Uncache and check each path in sequence
       RemoteIterator<CacheDirectiveEntry> entries =
-          nnRpc.listCacheDirectives(0, null);
+        new CacheDirectiveIterator(nnRpc, null);
       for (int i=0; i<numFiles; i++) {
         CacheDirectiveEntry entry = entries.next();
         nnRpc.removeCacheDirective(entry.getInfo().getId());
@@ -836,7 +904,8 @@ public class TestCacheDirectives {
       NameNode namenode = cluster.getNameNode();
       // Create the pool
       final String pool = "friendlyPool";
-      dfs.addCachePool(new CachePoolInfo(pool));
+      final CachePoolInfo poolInfo = new CachePoolInfo(pool);
+      dfs.addCachePool(poolInfo);
       // Create some test files
       final List<Path> paths = new LinkedList<Path>();
       paths.add(new Path("/foo/bar"));
@@ -852,6 +921,7 @@ public class TestCacheDirectives {
       }
       waitForCachedBlocks(namenode, 0, 0,
           "testWaitForCachedReplicasInDirectory:0");
+
       // cache entire directory
       long id = dfs.addCacheDirective(
             new CacheDirectiveInfo.Builder().
@@ -860,14 +930,20 @@ public class TestCacheDirectives {
               setPool(pool).
               build());
       waitForCachedBlocks(namenode, 4, 8,
-          "testWaitForCachedReplicasInDirectory:1");
+          "testWaitForCachedReplicasInDirectory:1:blocks");
       // Verify that listDirectives gives the stats we want.
-      waitForCachedStats(dfs, 2,
-          8 * BLOCK_SIZE, 8 * BLOCK_SIZE,
+      waitForCacheDirectiveStats(dfs,
+          4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
+          2, 2,
           new CacheDirectiveInfo.Builder().
               setPath(new Path("/foo")).
               build(),
-          "testWaitForCachedReplicasInDirectory:2");
+          "testWaitForCachedReplicasInDirectory:1:directive");
+      waitForCachePoolStats(dfs,
+          4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
+          2, 2,
+          poolInfo, "testWaitForCachedReplicasInDirectory:1:pool");
+
       long id2 = dfs.addCacheDirective(
             new CacheDirectiveInfo.Builder().
               setPath(new Path("/foo/bar")).
@@ -876,28 +952,42 @@ public class TestCacheDirectives {
               build());
       // wait for an additional 2 cached replicas to come up
       waitForCachedBlocks(namenode, 4, 10,
-          "testWaitForCachedReplicasInDirectory:3");
+          "testWaitForCachedReplicasInDirectory:2:blocks");
       // the directory directive's stats are unchanged
-      waitForCachedStats(dfs, 2,
-          8 * BLOCK_SIZE, 8 * BLOCK_SIZE,
+      waitForCacheDirectiveStats(dfs,
+          4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
+          2, 2,
           new CacheDirectiveInfo.Builder().
               setPath(new Path("/foo")).
               build(),
-          "testWaitForCachedReplicasInDirectory:4");
+          "testWaitForCachedReplicasInDirectory:2:directive-1");
       // verify /foo/bar's stats
-      waitForCachedStats(dfs, 1,
+      waitForCacheDirectiveStats(dfs,
           4 * numBlocksPerFile * BLOCK_SIZE,
           // only 3 because the file only has 3 replicas, not 4 as requested.
           3 * numBlocksPerFile * BLOCK_SIZE,
+          1,
+          // only 0 because the file can't be fully cached
+          0,
           new CacheDirectiveInfo.Builder().
               setPath(new Path("/foo/bar")).
               build(),
-          "testWaitForCachedReplicasInDirectory:5");
+          "testWaitForCachedReplicasInDirectory:2:directive-2");
+      waitForCachePoolStats(dfs,
+          (4+4) * numBlocksPerFile * BLOCK_SIZE,
+          (4+3) * numBlocksPerFile * BLOCK_SIZE,
+          3, 2,
+          poolInfo, "testWaitForCachedReplicasInDirectory:2:pool");
+
       // remove and watch numCached go to 0
       dfs.removeCacheDirective(id);
       dfs.removeCacheDirective(id2);
       waitForCachedBlocks(namenode, 0, 0,
-          "testWaitForCachedReplicasInDirectory:6");
+          "testWaitForCachedReplicasInDirectory:3:blocks");
+      waitForCachePoolStats(dfs,
+          0, 0,
+          0, 0,
+          poolInfo, "testWaitForCachedReplicasInDirectory:3:pool");
     } finally {
       cluster.shutdown();
     }

Modified: 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
 (original)
+++ 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
 Thu Dec  5 23:41:09 2013
@@ -158,4 +158,40 @@ public class TestFSNamesystem {
     fsNamesystem = new FSNamesystem(conf, fsImage);
     assertFalse(fsNamesystem.getFsLockForTests().isFair());
   }  
+  
+  @Test
+  public void testFSNamesystemLockCompatibility() {
+    FSNamesystemLock rwLock = new FSNamesystemLock(true);
+
+    assertEquals(0, rwLock.getReadHoldCount());
+    rwLock.readLock().lock();
+    assertEquals(1, rwLock.getReadHoldCount());
+
+    rwLock.readLock().lock();
+    assertEquals(2, rwLock.getReadHoldCount());
+
+    rwLock.readLock().unlock();
+    assertEquals(1, rwLock.getReadHoldCount());
+
+    rwLock.readLock().unlock();
+    assertEquals(0, rwLock.getReadHoldCount());
+
+    assertFalse(rwLock.isWriteLockedByCurrentThread());
+    assertEquals(0, rwLock.getWriteHoldCount());
+    rwLock.writeLock().lock();
+    assertTrue(rwLock.isWriteLockedByCurrentThread());
+    assertEquals(1, rwLock.getWriteHoldCount());
+    
+    rwLock.writeLock().lock();
+    assertTrue(rwLock.isWriteLockedByCurrentThread());
+    assertEquals(2, rwLock.getWriteHoldCount());
+
+    rwLock.writeLock().unlock();
+    assertTrue(rwLock.isWriteLockedByCurrentThread());
+    assertEquals(1, rwLock.getWriteHoldCount());
+
+    rwLock.writeLock().unlock();
+    assertFalse(rwLock.isWriteLockedByCurrentThread());
+    assertEquals(0, rwLock.getWriteHoldCount());
+  }
 }

Modified: 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
 (original)
+++ 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
 Thu Dec  5 23:41:09 2013
@@ -22,6 +22,7 @@ import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
+import java.net.BindException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -50,7 +51,7 @@ public class TestValidateConfigurationSe
    * an exception
    * is thrown when trying to re-use the same port
    */
-  @Test
+  @Test(expected = BindException.class)
   public void testThatMatchingRPCandHttpPortsThrowException() 
       throws IOException {
 
@@ -63,14 +64,7 @@ public class TestValidateConfigurationSe
     FileSystem.setDefaultUri(conf, "hdfs://localhost:9000"); 
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:9000");
     DFSTestUtil.formatNameNode(conf);
-    try {
-      NameNode nameNode = new NameNode(conf);
-      fail("Should have throw the exception since the ports match");
-    } catch (IOException e) {
-      // verify we're getting the right IOException
-      assertTrue(e.toString().contains("dfs.namenode.rpc-address (")); 
-      System.out.println("Got expected exception: " + e.toString());
-    }
+    new NameNode(conf);
   }
 
   /**

Modified: 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
 (original)
+++ 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
 Thu Dec  5 23:41:09 2013
@@ -29,6 +29,7 @@ import java.net.URI;
 import java.net.UnknownHostException;
 import java.util.EnumSet;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Random;
@@ -86,6 +87,7 @@ public class TestRetryCacheWithHA {
   private static final int BlockSize = 1024;
   private static final short DataNodes = 3;
   private static final int CHECKTIMES = 10;
+  private static final int ResponseSize = 3;
   
   private MiniDFSCluster cluster;
   private DistributedFileSystem dfs;
@@ -120,6 +122,8 @@ public class TestRetryCacheWithHA {
   @Before
   public void setup() throws Exception {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
+    
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, 
ResponseSize);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 
ResponseSize);
     cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
         .numDataNodes(DataNodes).build();
@@ -1176,4 +1180,92 @@ public class TestRetryCacheWithHA {
           + results.get(op.name));
     }
   }
+
+  /**
+   * Add a list of cache pools, list cache pools,
+   * switch active NN, and list cache pools again.
+   */
+  @Test (timeout=60000)
+  public void testListCachePools() throws Exception {
+    final int poolCount = 7;
+    HashSet<String> poolNames = new HashSet<String>(poolCount);
+    for (int i=0; i<poolCount; i++) {
+      String poolName = "testListCachePools-" + i;
+      dfs.addCachePool(new CachePoolInfo(poolName));
+      poolNames.add(poolName);
+    }
+    listCachePools(poolNames, 0);
+
+    cluster.transitionToStandby(0);
+    cluster.transitionToActive(1);
+    cluster.waitActive(1);
+    listCachePools(poolNames, 1);
+  }
+
+  /**
+   * Add a list of cache directives, list cache directives,
+   * switch active NN, and list cache directives again.
+   */
+  @Test (timeout=60000)
+  public void testListCacheDirectives() throws Exception {
+    final int poolCount = 7;
+    HashSet<String> poolNames = new HashSet<String>(poolCount);
+    Path path = new Path("/p");
+    for (int i=0; i<poolCount; i++) {
+      String poolName = "testListCacheDirectives-" + i;
+      CacheDirectiveInfo directiveInfo =
+        new 
CacheDirectiveInfo.Builder().setPool(poolName).setPath(path).build();
+      dfs.addCachePool(new CachePoolInfo(poolName));
+      dfs.addCacheDirective(directiveInfo);
+      poolNames.add(poolName);
+    }
+    listCacheDirectives(poolNames, 0);
+
+    cluster.transitionToStandby(0);
+    cluster.transitionToActive(1);
+    cluster.waitActive(1);
+    listCacheDirectives(poolNames, 1);
+  }
+
+  @SuppressWarnings("unchecked")
+  private void listCachePools(
+      HashSet<String> poolNames, int active) throws Exception {
+    HashSet<String> tmpNames = (HashSet<String>)poolNames.clone();
+    RemoteIterator<CachePoolEntry> pools = dfs.listCachePools();
+    int poolCount = poolNames.size();
+    for (int i=0; i<poolCount; i++) {
+      CachePoolEntry pool = pools.next();
+      String pollName = pool.getInfo().getPoolName();
+      assertTrue("The pool name should be expected", 
tmpNames.remove(pollName));
+      if (i % 2 == 0) {
+        int standby = active;
+        active = (standby == 0) ? 1 : 0;
+        cluster.transitionToStandby(standby);
+        cluster.transitionToActive(active);
+        cluster.waitActive(active);
+      }
+    }
+    assertTrue("All pools must be found", tmpNames.isEmpty());
+  }
+
+  @SuppressWarnings("unchecked")
+  private void listCacheDirectives(
+      HashSet<String> poolNames, int active) throws Exception {
+    HashSet<String> tmpNames = (HashSet<String>)poolNames.clone();
+    RemoteIterator<CacheDirectiveEntry> directives = 
dfs.listCacheDirectives(null);
+    int poolCount = poolNames.size();
+    for (int i=0; i<poolCount; i++) {
+      CacheDirectiveEntry directive = directives.next();
+      String pollName = directive.getInfo().getPool();
+      assertTrue("The pool name should be expected", 
tmpNames.remove(pollName));
+      if (i % 2 == 0) {
+        int standby = active;
+        active = (standby == 0) ? 1 : 0;
+        cluster.transitionToStandby(standby);
+        cluster.transitionToActive(active);
+        cluster.waitActive(active);
+      }
+    }
+    assertTrue("All pools must be found", tmpNames.isEmpty());
+  }
 }

Modified: 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
 (original)
+++ 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
 Thu Dec  5 23:41:09 2013
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -49,7 +50,7 @@ public class TestHttpsFileSystem {
   public static void setUp() throws Exception {
     conf = new Configuration();
     conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
-    conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true);
+    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, 
HttpConfig.Policy.HTTPS_ONLY.name());
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
 
     File base = new File(BASEDIR);

Modified: 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml
 (original)
+++ 
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml
 Thu Dec  5 23:41:09 2013
@@ -399,5 +399,63 @@
       </comparators>
     </test>
 
+    <test> <!--Tested -->
+      <description>Testing listing cache pool statistics</description>
+      <test-commands>
+        <cache-admin-command>-addPool foo -owner bob -group bob -mode 
0664</cache-admin-command>
+        <cache-admin-command>-addPool bar -owner alice -group alicegroup -mode 
0755</cache-admin-command>
+        <cache-admin-command>-listPools -stats</cache-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <cache-admin-command>-removePool foo</cache-admin-command>
+        <cache-admin-command>-removePool bar</cache-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Found 2 results.</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>bar   alice  alicegroup  rwxr-xr-x      100         
    0             0             0             0</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>foo   bob    bob         rw-rw-r--      100         
    0             0             0             0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>Testing listing cache directive statistics</description>
+      <test-commands>
+        <cache-admin-command>-addPool pool1</cache-admin-command>
+        <cache-admin-command>-addDirective -path /foo -pool pool1 -ttl 
2d</cache-admin-command>
+        <cache-admin-command>-addDirective -path /bar -pool pool1 -ttl 
24h</cache-admin-command>
+        <cache-admin-command>-addDirective -path /baz -replication 2 -pool 
pool1 -ttl 60m</cache-admin-command>
+        <cache-admin-command>-listDirectives -pool pool1 
-stats</cache-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <cache-admin-command>-removePool pool1</cache-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Found 3 entries</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>/foo              0             0             0     
        0</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>/bar              0             0             0     
        0</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>/baz              0             0             0     
        0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
   </tests>
 </configuration>


Reply via email to