Author: wang
Date: Mon May 19 19:29:30 2014
New Revision: 1596000

URL: http://svn.apache.org/r1596000
Log:
Merge trunk r1595999 to branch.

Added:
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java
      - copied unchanged from r1595999, 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ScopedAclEntries.java
      - copied unchanged from r1595999, 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ScopedAclEntries.java
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/test/resources/
      - copied from r1595999, 
hadoop/common/trunk/hadoop-common-project/hadoop-nfs/src/test/resources/
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/test/resources/log4j.properties
      - copied unchanged from r1595999, 
hadoop/common/trunk/hadoop-common-project/hadoop-nfs/src/test/resources/log4j.properties
Modified:
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/CHANGES.txt
   (contents, props changed)
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/docs/
   (props changed)
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/
   (props changed)
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/test/core/
   (props changed)
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java
    
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java

Modified: 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1596000&r1=1595999&r2=1596000&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/CHANGES.txt
 (original)
+++ 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/CHANGES.txt
 Mon May 19 19:29:30 2014
@@ -328,6 +328,8 @@ Trunk (Unreleased)
 
     HADOOP-10583. bin/hadoop key throws NPE with no args and assorted other 
fixups. (clamb via tucu)
 
+    HADOOP-10586. KeyShell doesn't allow setting Options via CLI. (clamb via 
tucu)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -388,6 +390,11 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10572. Example NFS mount command must pass noacl as it isn't
     supported by the server yet. (Harsh J via brandonli)
 
+    HADOOP-10609. .gitignore should ignore .orig and .rej files. (kasha)
+
+    HADOOP-10614. CBZip2InputStream is not threadsafe (Xiangrui Meng via
+    Sandy Ryza)
+
   OPTIMIZATIONS
 
   BUG FIXES 

Propchange: 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/CHANGES.txt
------------------------------------------------------------------------------
  Merged 
/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt:r1594887-1595999

Propchange: 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/docs/
------------------------------------------------------------------------------
  Merged 
/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/docs:r1588992-1595999

Propchange: 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/
------------------------------------------------------------------------------
  Merged 
/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java:r1594887-1595999

Modified: 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java?rev=1596000&r1=1595999&r2=1596000&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
 (original)
+++ 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
 Mon May 19 19:29:30 2014
@@ -89,6 +89,8 @@ public class KeyShell extends Configured
    * @throws IOException
    */
   private int init(String[] args) throws IOException {
+    final Options options = KeyProvider.options(getConf());
+
     for (int i = 0; i < args.length; i++) { // parse command line
       boolean moreTokens = (i < args.length - 1);
       if (args[i].equals("create")) {
@@ -97,7 +99,7 @@ public class KeyShell extends Configured
           keyName = args[++i];
         }
 
-        command = new CreateCommand(keyName);
+        command = new CreateCommand(keyName, options);
         if ("--help".equals(keyName)) {
           printKeyShellUsage();
           return -1;
@@ -127,9 +129,11 @@ public class KeyShell extends Configured
       } else if ("list".equals(args[i])) {
         command = new ListCommand();
       } else if ("--size".equals(args[i]) && moreTokens) {
-        getConf().set(KeyProvider.DEFAULT_BITLENGTH_NAME, args[++i]);
+        options.setBitLength(Integer.parseInt(args[++i]));
       } else if ("--cipher".equals(args[i]) && moreTokens) {
-        getConf().set(KeyProvider.DEFAULT_CIPHER_NAME, args[++i]);
+        options.setCipher(args[++i]);
+      } else if ("--description".equals(args[i]) && moreTokens) {
+        options.setDescription(args[++i]);
       } else if ("--provider".equals(args[i]) && moreTokens) {
         userSuppliedProvider = true;
         getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]);
@@ -399,6 +403,7 @@ public class KeyShell extends Configured
   private class CreateCommand extends Command {
     public static final String USAGE =
       "create <keyname> [--cipher <cipher>] [--size <size>]\n" +
+      "                     [--description <description>]\n" +
       "                     [--provider <provider>] [--help]";
     public static final String DESC =
       "The create subcommand creates a new key for the name specified\n" +
@@ -408,10 +413,12 @@ public class KeyShell extends Configured
       "The default keysize is 256. You may specify the requested key\n" +
       "length using the --size argument.\n";
 
-    String keyName = null;
+    final String keyName;
+    final Options options;
 
-    public CreateCommand(String keyName) {
+    public CreateCommand(String keyName, Options options) {
       this.keyName = keyName;
+      this.options = options;
     }
 
     public boolean validate() {
@@ -434,7 +441,6 @@ public class KeyShell extends Configured
     public void execute() throws IOException, NoSuchAlgorithmException {
       warnIfTransientProvider();
       try {
-        Options options = KeyProvider.options(getConf());
         provider.createKey(keyName, options);
         out.println(keyName + " has been successfully created.");
         provider.flush();

Modified: 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java?rev=1596000&r1=1595999&r2=1596000&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
 (original)
+++ 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
 Mon May 19 19:29:30 2014
@@ -100,6 +100,21 @@ public class FileStatus implements Writa
   }
 
   /**
+   * Copy constructor.
+   *
+   * @param other FileStatus to copy
+   */
+  public FileStatus(FileStatus other) throws IOException {
+    // It's important to call the getters here instead of directly accessing 
the
+    // members.  Subclasses like ViewFsFileStatus can override the getters.
+    this(other.getLen(), other.isDirectory(), other.getReplication(),
+      other.getBlockSize(), other.getModificationTime(), other.getAccessTime(),
+      other.getPermission(), other.getOwner(), other.getGroup(),
+      (other.isSymlink() ? other.getSymlink() : null),
+      other.getPath());
+  }
+
+  /**
    * Get the length of this file, in bytes.
    * @return the length of this file, in bytes.
    */

Modified: 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java?rev=1596000&r1=1595999&r2=1596000&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
 (original)
+++ 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
 Mon May 19 19:29:30 2014
@@ -18,7 +18,7 @@
 package org.apache.hadoop.fs.shell;
 
 import java.io.IOException;
-import java.util.Iterator;
+import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
 
@@ -31,8 +31,10 @@ import org.apache.hadoop.fs.permission.A
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.ScopedAclEntries;
 
 /**
  * Acl related operations
@@ -84,67 +86,34 @@ class AclCommands extends FsCommand {
           (perm.getOtherAction().implies(FsAction.EXECUTE) ? "t" : "T"));
       }
 
-      if (perm.getAclBit()) {
-        AclStatus aclStatus = item.fs.getAclStatus(item.path);
-        List<AclEntry> entries = aclStatus.getEntries();
-        printExtendedAcl(perm, entries);
-      } else {
-        printMinimalAcl(perm);
-      }
-
+      List<AclEntry> entries = perm.getAclBit() ?
+        item.fs.getAclStatus(item.path).getEntries() :
+        Collections.<AclEntry>emptyList();
+      ScopedAclEntries scopedEntries = new ScopedAclEntries(
+        AclUtil.getAclFromPermAndEntries(perm, entries));
+      printAclEntriesForSingleScope(scopedEntries.getAccessEntries());
+      printAclEntriesForSingleScope(scopedEntries.getDefaultEntries());
       out.println();
     }
 
     /**
-     * Prints an extended ACL, including all extended ACL entries and also the
-     * base entries implied by the permission bits.
+     * Prints all the ACL entries in a single scope.
      *
-     * @param perm FsPermission of file
      * @param entries List<AclEntry> containing ACL entries of file
      */
-    private void printExtendedAcl(FsPermission perm, List<AclEntry> entries) {
-      // Print owner entry implied by owner permission bits.
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.USER)
-        .setPermission(perm.getUserAction())
-        .build());
-
-      // Print all extended access ACL entries.
-      boolean hasAccessAcl = false;
-      Iterator<AclEntry> entryIter = entries.iterator();
-      AclEntry curEntry = null;
-      while (entryIter.hasNext()) {
-        curEntry = entryIter.next();
-        if (curEntry.getScope() == AclEntryScope.DEFAULT) {
-          break;
-        }
-        hasAccessAcl = true;
-        printExtendedAclEntry(curEntry, perm.getGroupAction());
+    private void printAclEntriesForSingleScope(List<AclEntry> entries) {
+      if (entries.isEmpty()) {
+        return;
       }
-
-      // Print mask entry implied by group permission bits, or print group 
entry
-      // if there is no access ACL (only default ACL).
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(hasAccessAcl ? AclEntryType.MASK : AclEntryType.GROUP)
-        .setPermission(perm.getGroupAction())
-        .build());
-
-      // Print other entry implied by other bits.
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.OTHER)
-        .setPermission(perm.getOtherAction())
-        .build());
-
-      // Print default ACL entries.
-      if (curEntry != null && curEntry.getScope() == AclEntryScope.DEFAULT) {
-        out.println(curEntry);
-        // ACL sort order guarantees default mask is the second-to-last entry.
+      if (AclUtil.isMinimalAcl(entries)) {
+        for (AclEntry entry: entries) {
+          out.println(entry);
+        }
+      } else {
+        // ACL sort order guarantees mask is the second-to-last entry.
         FsAction maskPerm = entries.get(entries.size() - 2).getPermission();
-        while (entryIter.hasNext()) {
-          printExtendedAclEntry(entryIter.next(), maskPerm);
+        for (AclEntry entry: entries) {
+          printExtendedAclEntry(entry, maskPerm);
         }
       }
     }
@@ -172,30 +141,6 @@ class AclCommands extends FsCommand {
         out.println(entry);
       }
     }
-
-    /**
-     * Prints a minimal ACL, consisting of exactly 3 ACL entries implied by the
-     * permission bits.
-     *
-     * @param perm FsPermission of file
-     */
-    private void printMinimalAcl(FsPermission perm) {
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.USER)
-        .setPermission(perm.getUserAction())
-        .build());
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.GROUP)
-        .setPermission(perm.getGroupAction())
-        .build());
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.OTHER)
-        .setPermission(perm.getOtherAction())
-        .build());
-    }
   }
 
   /**

Modified: 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java?rev=1596000&r1=1595999&r2=1596000&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
 (original)
+++ 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
 Mon May 19 19:29:30 2014
@@ -129,7 +129,7 @@ public class CBZip2InputStream extends I
   private int computedBlockCRC, computedCombinedCRC;
 
   private boolean skipResult = false;// used by skipToNextMarker
-  private static boolean skipDecompression = false;
+  private boolean skipDecompression = false;
 
   // Variables used by setup* methods exclusively
 
@@ -281,12 +281,18 @@ public class CBZip2InputStream extends I
   */
   public CBZip2InputStream(final InputStream in, READ_MODE readMode)
       throws IOException {
+    this(in, readMode, false);
+  }
+
+  private CBZip2InputStream(final InputStream in, READ_MODE readMode, boolean 
skipDecompression)
+      throws IOException {
 
     super();
     int blockSize = 0X39;// i.e 9
     this.blockSize100k = blockSize - '0';
     this.in = new BufferedInputStream(in, 1024 * 9);// >1 MB buffer
     this.readMode = readMode;
+    this.skipDecompression = skipDecompression;
     if (readMode == READ_MODE.CONTINUOUS) {
       currentState = STATE.START_BLOCK_STATE;
       lazyInitialization = (in.available() == 0)?true:false;
@@ -316,11 +322,7 @@ public class CBZip2InputStream extends I
    *
    */
   public static long numberOfBytesTillNextMarker(final InputStream in) throws 
IOException{
-    CBZip2InputStream.skipDecompression = true;
-    CBZip2InputStream anObject = null;
-
-    anObject = new CBZip2InputStream(in, READ_MODE.BYBLOCK);
-
+    CBZip2InputStream anObject = new CBZip2InputStream(in, READ_MODE.BYBLOCK, 
true);
     return anObject.getProcessedByteCount();
   }
 
@@ -397,7 +399,7 @@ public class CBZip2InputStream extends I
 
     if(skipDecompression){
       changeStateToProcessABlock();
-      CBZip2InputStream.skipDecompression = false;
+      skipDecompression = false;
     }
 
     final int hi = offs + len;

Propchange: 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/test/core/
------------------------------------------------------------------------------
  Merged 
/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/core:r1588992-1595999

Modified: 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java?rev=1596000&r1=1595999&r2=1596000&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
 (original)
+++ 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
 Mon May 19 19:29:30 2014
@@ -111,6 +111,30 @@ public class TestKeyShell {
     assertFalse(outContent.toString(), outContent.toString().contains("key1"));
   }
   
+  /* HADOOP-10586 KeyShell didn't allow -description. */
+  @Test
+  public void testKeySuccessfulCreationWithDescription() throws Exception {
+    outContent.reset();
+    String[] args1 = {"create", "key1", "--provider",
+                      "jceks://file" + tmpDir + "/keystore.jceks",
+                      "--description", "someDescription"};
+    int rc = 0;
+    KeyShell ks = new KeyShell();
+    ks.setConf(new Configuration());
+    rc = ks.run(args1);
+    assertEquals(0, rc);
+    assertTrue(outContent.toString().contains("key1 has been successfully " +
+        "created."));
+
+    outContent.reset();
+    String[] args2a = {"list", "--metadata", "--provider",
+                      "jceks://file" + tmpDir + "/keystore.jceks"};
+    rc = ks.run(args2a);
+    assertEquals(0, rc);
+    assertTrue(outContent.toString().contains("description"));
+    assertTrue(outContent.toString().contains("someDescription"));
+  }
+
   @Test
   public void testInvalidKeySize() throws Exception {
     String[] args1 = {"create", "key1", "--size", "56", "--provider", 

Modified: 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java?rev=1596000&r1=1595999&r2=1596000&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
 (original)
+++ 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
 Mon May 19 19:29:30 2014
@@ -113,7 +113,23 @@ public class IdUserGroup {
           "The new entry is to be ignored for the following reason.",
           DUPLICATE_NAME_ID_DEBUG_INFO));
   }
-      
+
+  /**
+   * uid and gid are defined as uint32 in linux. Some systems create
+   * (intended or unintended) <nfsnobody, 4294967294> kind of <name,Id>
+   * mapping, where 4294967294 is 2**32-2 as unsigned int32. As an example,
+   *   https://bugzilla.redhat.com/show_bug.cgi?id=511876.
+   * Because user or group id are treated as Integer (signed integer or int32)
+   * here, the number 4294967294 is out of range. The solution is to convert
+   * uint32 to int32, so to map the out-of-range ID to the negative side of
+   * Integer, e.g. 4294967294 maps to -2 and 4294967295 maps to -1.
+   */
+  private static Integer parseId(final String idStr) {
+    Long longVal = Long.parseLong(idStr);
+    int intVal = longVal.intValue();
+    return Integer.valueOf(intVal);
+  }
+  
   /**
    * Get the whole list of users and groups and save them in the maps.
    * @throws IOException 
@@ -134,8 +150,8 @@ public class IdUserGroup {
         }
         LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + 
nameId[1]);
         // HDFS can't differentiate duplicate names with simple authentication
-        final Integer key = Integer.valueOf(nameId[1]);
-        final String value = nameId[0];        
+        final Integer key = parseId(nameId[1]);        
+        final String value = nameId[0];
         if (map.containsKey(key)) {
           final String prevValue = map.get(key);
           if (value.equals(prevValue)) {

Modified: 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java?rev=1596000&r1=1595999&r2=1596000&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
 (original)
+++ 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
 Mon May 19 19:29:30 2014
@@ -19,11 +19,14 @@ package org.apache.hadoop.oncrpc;
 
 import java.io.IOException;
 import java.net.DatagramSocket;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
 import org.apache.hadoop.oncrpc.security.Verifier;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.apache.hadoop.portmap.PortmapMapping;
 import org.apache.hadoop.portmap.PortmapRequest;
 import org.jboss.netty.buffer.ChannelBuffer;
@@ -37,7 +40,7 @@ import org.jboss.netty.channel.SimpleCha
  * and implement {@link #handleInternal} to handle the requests received.
  */
 public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
-  private static final Log LOG = LogFactory.getLog(RpcProgram.class);
+  static final Log LOG = LogFactory.getLog(RpcProgram.class);
   public static final int RPCB_PORT = 111;
   private final String program;
   private final String host;
@@ -45,6 +48,7 @@ public abstract class RpcProgram extends
   private final int progNumber;
   private final int lowProgVersion;
   private final int highProgVersion;
+  private final boolean allowInsecurePorts;
   
   /**
    * If not null, this will be used as the socket to use to connect to the
@@ -61,10 +65,14 @@ public abstract class RpcProgram extends
    * @param progNumber program number as defined in RFC 1050
    * @param lowProgVersion lowest version of the specification supported
    * @param highProgVersion highest version of the specification supported
+   * @param DatagramSocket registrationSocket if not null, use this socket to
+   *        register with portmap daemon
+   * @param allowInsecurePorts true to allow client connections from
+   *        unprivileged ports, false otherwise
    */
   protected RpcProgram(String program, String host, int port, int progNumber,
       int lowProgVersion, int highProgVersion,
-      DatagramSocket registrationSocket) {
+      DatagramSocket registrationSocket, boolean allowInsecurePorts) {
     this.program = program;
     this.host = host;
     this.port = port;
@@ -72,6 +80,9 @@ public abstract class RpcProgram extends
     this.lowProgVersion = lowProgVersion;
     this.highProgVersion = highProgVersion;
     this.registrationSocket = registrationSocket;
+    this.allowInsecurePorts = allowInsecurePorts;
+    LOG.info("Will " + (allowInsecurePorts ? "" : "not ") + "accept client "
+        + "connections from unprivileged ports");
   }
 
   /**
@@ -133,43 +144,82 @@ public abstract class RpcProgram extends
       throws Exception {
     RpcInfo info = (RpcInfo) e.getMessage();
     RpcCall call = (RpcCall) info.header();
+    
+    SocketAddress remoteAddress = info.remoteAddress();
+    if (!allowInsecurePorts) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Will not allow connections from unprivileged ports. " +
+            "Checking for valid client port...");
+      }
+      if (remoteAddress instanceof InetSocketAddress) {
+        InetSocketAddress inetRemoteAddress = (InetSocketAddress) 
remoteAddress;
+        if (inetRemoteAddress.getPort() > 1023) {
+          LOG.warn("Connection attempted from '" + inetRemoteAddress + "' "
+              + "which is an unprivileged port. Rejecting connection.");
+          sendRejectedReply(call, remoteAddress, ctx);
+          return;
+        } else {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Accepting connection from '" + remoteAddress + "'");
+          }
+        }
+      } else {
+        LOG.warn("Could not determine remote port of socket address '" +
+            remoteAddress + "'. Rejecting connection.");
+        sendRejectedReply(call, remoteAddress, ctx);
+        return;
+      }
+    }
+    
     if (LOG.isTraceEnabled()) {
       LOG.trace(program + " procedure #" + call.getProcedure());
     }
     
     if (this.progNumber != call.getProgram()) {
       LOG.warn("Invalid RPC call program " + call.getProgram());
-      RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
-          AcceptState.PROG_UNAVAIL, Verifier.VERIFIER_NONE);
-
-      XDR out = new XDR();
-      reply.write(out);
-      ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
-          .buffer());
-      RpcResponse rsp = new RpcResponse(b, info.remoteAddress());
-      RpcUtil.sendRpcResponse(ctx, rsp);
+      sendAcceptedReply(call, remoteAddress, AcceptState.PROG_UNAVAIL, ctx);
       return;
     }
 
     int ver = call.getVersion();
     if (ver < lowProgVersion || ver > highProgVersion) {
       LOG.warn("Invalid RPC call version " + ver);
-      RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
-          AcceptState.PROG_MISMATCH, Verifier.VERIFIER_NONE);
-
-      XDR out = new XDR();
-      reply.write(out);
-      out.writeInt(lowProgVersion);
-      out.writeInt(highProgVersion);
-      ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
-          .buffer());
-      RpcResponse rsp = new RpcResponse(b, info.remoteAddress());
-      RpcUtil.sendRpcResponse(ctx, rsp);
+      sendAcceptedReply(call, remoteAddress, AcceptState.PROG_MISMATCH, ctx);
       return;
     }
     
     handleInternal(ctx, info);
   }
+  
+  private void sendAcceptedReply(RpcCall call, SocketAddress remoteAddress,
+      AcceptState acceptState, ChannelHandlerContext ctx) {
+    RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
+        acceptState, Verifier.VERIFIER_NONE);
+
+    XDR out = new XDR();
+    reply.write(out);
+    if (acceptState == AcceptState.PROG_MISMATCH) {
+      out.writeInt(lowProgVersion);
+      out.writeInt(highProgVersion);
+    }
+    ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
+        .buffer());
+    RpcResponse rsp = new RpcResponse(b, remoteAddress);
+    RpcUtil.sendRpcResponse(ctx, rsp);
+  }
+  
+  private static void sendRejectedReply(RpcCall call,
+      SocketAddress remoteAddress, ChannelHandlerContext ctx) {
+    XDR out = new XDR();
+    RpcDeniedReply reply = new RpcDeniedReply(call.getXid(),
+        RpcReply.ReplyState.MSG_DENIED,
+        RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone());
+    reply.write(out);
+    ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
+        .buffer());
+    RpcResponse rsp = new RpcResponse(buf, remoteAddress);
+    RpcUtil.sendRpcResponse(ctx, rsp);
+  }
 
   protected abstract void handleInternal(ChannelHandlerContext ctx, RpcInfo 
info);
   

Modified: 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java?rev=1596000&r1=1595999&r2=1596000&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java
 (original)
+++ 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java
 Mon May 19 19:29:30 2014
@@ -67,6 +67,51 @@ public class TestIdUserGroup {
   }
 
   @Test
+  public void testIdOutOfIntegerRange() throws IOException {
+    String GET_ALL_USERS_CMD = "echo \""
+        + "nfsnobody:x:4294967294:4294967294:Anonymous NFS 
User:/var/lib/nfs:/sbin/nologin\n"
+        + "nfsnobody1:x:4294967295:4294967295:Anonymous NFS 
User:/var/lib/nfs1:/sbin/nologin\n"
+        + "maxint:x:2147483647:2147483647:Grid Distributed File 
System:/home/maxint:/bin/bash\n"
+        + "minint:x:2147483648:2147483648:Grid Distributed File 
System:/home/minint:/bin/bash\n"
+        + "archivebackup:*:1031:4294967294:Archive 
Backup:/home/users/archivebackup:/bin/sh\n"
+        + "hdfs:x:11501:10787:Grid Distributed File 
System:/home/hdfs:/bin/bash\n"
+        + "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""
+        + " | cut -d: -f1,3";
+    String GET_ALL_GROUPS_CMD = "echo \""
+        + "hdfs:*:11501:hrt_hdfs\n"
+        + "rpcuser:*:29:\n"
+        + "nfsnobody:*:4294967294:\n"
+        + "nfsnobody1:*:4294967295:\n"
+        + "maxint:*:2147483647:\n"
+        + "minint:*:2147483648:\n"
+        + "mapred3:x:498\"" 
+        + " | cut -d: -f1,3";
+    // Maps for id to name map
+    BiMap<Integer, String> uMap = HashBiMap.create();
+    BiMap<Integer, String> gMap = HashBiMap.create();
+
+    IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":");
+    assertTrue(uMap.size() == 7);
+    assertEquals("nfsnobody", uMap.get(-2));
+    assertEquals("nfsnobody1", uMap.get(-1));
+    assertEquals("maxint", uMap.get(2147483647));
+    assertEquals("minint", uMap.get(-2147483648));
+    assertEquals("archivebackup", uMap.get(1031));
+    assertEquals("hdfs",uMap.get(11501));
+    assertEquals("daemon", uMap.get(2));
+
+    IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":");
+    assertTrue(gMap.size() == 7);
+    assertEquals("hdfs",gMap.get(11501));
+    assertEquals("rpcuser", gMap.get(29));
+    assertEquals("nfsnobody", gMap.get(-2));
+    assertEquals("nfsnobody1", gMap.get(-1));
+    assertEquals("maxint", gMap.get(2147483647));
+    assertEquals("minint", gMap.get(-2147483648));
+    assertEquals("mapred3", gMap.get(498));
+  }
+
+  @Test
   public void testUserUpdateSetting() throws IOException {
     IdUserGroup iug = new IdUserGroup();
     assertEquals(iug.getTimeout(), IdUserGroup.TIMEOUT_DEFAULT);

Modified: 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java?rev=1596000&r1=1595999&r2=1596000&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
 (original)
+++ 
hadoop/common/branches/HDFS-2006/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
 Mon May 19 19:29:30 2014
@@ -28,6 +28,8 @@ import java.util.Random;
 import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder;
 import org.apache.hadoop.oncrpc.security.CredentialsNone;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
+import org.apache.log4j.Level;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffers;
@@ -38,10 +40,16 @@ import org.junit.Test;
 import org.mockito.Mockito;
 
 public class TestFrameDecoder {
+  
+  static {
+    ((Log4JLogger) RpcProgram.LOG).getLogger().setLevel(Level.ALL);
+  }
 
   private static int resultSize;
 
   static void testRequest(XDR request, int serverPort) {
+    // Reset resultSize so as to avoid interference from other tests in this 
class.
+    resultSize = 0;
     SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", serverPort, 
request,
         true);
     tcpClient.run();
@@ -50,9 +58,10 @@ public class TestFrameDecoder {
   static class TestRpcProgram extends RpcProgram {
 
     protected TestRpcProgram(String program, String host, int port,
-        int progNumber, int lowProgVersion, int highProgVersion) {
+        int progNumber, int lowProgVersion, int highProgVersion,
+        boolean allowInsecurePorts) {
       super(program, host, port, progNumber, lowProgVersion, highProgVersion,
-          null);
+          null, allowInsecurePorts);
     }
 
     @Override
@@ -149,7 +158,41 @@ public class TestFrameDecoder {
 
   @Test
   public void testFrames() {
+    int serverPort = startRpcServer(true);
 
+    XDR xdrOut = createGetportMount();
+    int headerSize = xdrOut.size();
+    int bufsize = 2 * 1024 * 1024;
+    byte[] buffer = new byte[bufsize];
+    xdrOut.writeFixedOpaque(buffer);
+    int requestSize = xdrOut.size() - headerSize;
+
+    // Send the request to the server
+    testRequest(xdrOut, serverPort);
+
+    // Verify the server got the request with right size
+    assertEquals(requestSize, resultSize);
+  }
+  
+  @Test
+  public void testUnprivilegedPort() {
+    // Don't allow connections from unprivileged ports. Given that this test is
+    // presumably not being run by root, this will be the case.
+    int serverPort = startRpcServer(false);
+
+    XDR xdrOut = createGetportMount();
+    int bufsize = 2 * 1024 * 1024;
+    byte[] buffer = new byte[bufsize];
+    xdrOut.writeFixedOpaque(buffer);
+
+    // Send the request to the server
+    testRequest(xdrOut, serverPort);
+
+    // Verify the server rejected the request.
+    assertEquals(0, resultSize);
+  }
+  
+  private static int startRpcServer(boolean allowInsecurePorts) {
     Random rand = new Random();
     int serverPort = 30000 + rand.nextInt(10000);
     int retries = 10;    // A few retries in case initial choice is in use.
@@ -157,7 +200,7 @@ public class TestFrameDecoder {
     while (true) {
       try {
         RpcProgram program = new 
TestFrameDecoder.TestRpcProgram("TestRpcProgram",
-            "localhost", serverPort, 100000, 1, 2);
+            "localhost", serverPort, 100000, 1, 2, allowInsecurePorts);
         SimpleTcpServer tcpServer = new SimpleTcpServer(serverPort, program, 
1);
         tcpServer.run();
         break;          // Successfully bound a port, break out.
@@ -169,19 +212,7 @@ public class TestFrameDecoder {
         }
       }
     }
-
-    XDR xdrOut = createGetportMount();
-    int headerSize = xdrOut.size();
-    int bufsize = 2 * 1024 * 1024;
-    byte[] buffer = new byte[bufsize];
-    xdrOut.writeFixedOpaque(buffer);
-    int requestSize = xdrOut.size() - headerSize;
-
-    // Send the request to the server
-    testRequest(xdrOut, serverPort);
-
-    // Verify the server got the request with right size
-    assertEquals(requestSize, resultSize);
+    return serverPort;
   }
 
   static void createPortmapXDRheader(XDR xdr_out, int procedure) {


Reply via email to