Revert "HDFS-9686. Remove useless boxing/unboxing code. Contributed by Kousuke 
Saruta."

This reverts commit fe124da5ffc16e4795c3dd5542accd58361e1b08.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e565ce7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e565ce7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e565ce7

Branch: refs/heads/yarn-2877
Commit: 5e565ce7a7e194f82e935b0e9808744e6bd64287
Parents: e9a6226
Author: Akira Ajisaka <aajis...@apache.org>
Authored: Wed Feb 10 23:43:33 2016 +0900
Committer: Akira Ajisaka <aajis...@apache.org>
Committed: Wed Feb 10 23:43:33 2016 +0900

----------------------------------------------------------------------
 .../apache/hadoop/lib/servlet/ServerWebApp.java | 23 +++----
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 -
 .../hdfs/server/namenode/FSEditLogOp.java       | 41 +++++-------
 .../top/window/RollingWindowManager.java        |  8 +--
 .../hdfs/ErasureCodeBenchmarkThroughput.java    | 17 +++--
 .../shortcircuit/TestShortCircuitLocalRead.java | 66 ++++++++------------
 .../mapreduce/v2/app/TestStagingCleanup.java    | 57 -----------------
 7 files changed, 60 insertions(+), 155 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e565ce7/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
index 5e855de..c745f45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
@@ -36,9 +36,7 @@ import java.text.MessageFormat;
  * and uses its lifecycle to start and stop the server.
  */
 @InterfaceAudience.Private
-public abstract class ServerWebApp
-       extends Server
-       implements ServletContextListener {
+public abstract class ServerWebApp extends Server implements 
ServletContextListener {
 
   private static final String HOME_DIR = ".home.dir";
   private static final String CONFIG_DIR = ".config.dir";
@@ -63,8 +61,8 @@ public abstract class ServerWebApp
   /**
    * Constructor for testing purposes.
    */
-  protected ServerWebApp(String name, String homeDir, String configDir,
-                         String logDir, String tempDir, Configuration config) {
+  protected ServerWebApp(String name, String homeDir, String configDir, String 
logDir, String tempDir,
+                         Configuration config) {
     super(name, homeDir, configDir, logDir, tempDir, config);
   }
 
@@ -122,8 +120,7 @@ public abstract class ServerWebApp
       String sysProp = name + HOME_DIR;
       homeDir = System.getProperty(sysProp);
       if (homeDir == null) {
-        throw new IllegalArgumentException(MessageFormat.format(
-            "System property [{0}] not defined", sysProp));
+        throw new IllegalArgumentException(MessageFormat.format("System 
property [{0}] not defined", sysProp));
       }
     }
     return homeDir;
@@ -163,8 +160,7 @@ public abstract class ServerWebApp
   }
 
   /**
-   * Resolves the host and port InetSocketAddress the
-   * web server is listening to.
+   * Resolves the host and port InetSocketAddress the web server is listening 
to.
    * <p>
    * This implementation looks for the following 2 properties:
    * <ul>
@@ -172,10 +168,8 @@ public abstract class ServerWebApp
    *   <li>#SERVER_NAME#.http.port</li>
    * </ul>
    *
-   * @return the host and port InetSocketAddress the
-   *         web server is listening to.
-   * @throws ServerException thrown
-   *         if any of the above 2 properties is not defined.
+   * @return the host and port InetSocketAddress the web server is listening 
to.
+   * @throws ServerException thrown if any of the above 2 properties is not 
defined.
    */
   protected InetSocketAddress resolveAuthority() throws ServerException {
     String hostnameKey = getName() + HTTP_HOSTNAME;
@@ -239,7 +233,6 @@ public abstract class ServerWebApp
    *
    */
   public boolean isSslEnabled() {
-    return Boolean.parseBoolean(
-      System.getProperty(getName() + SSL_ENABLED, "false"));
+    return Boolean.valueOf(System.getProperty(getName() + SSL_ENABLED, 
"false"));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e565ce7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4b8bdcb..2901fd8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1962,9 +1962,6 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-9726. Refactor IBR code to a new class.  (szetszwo)
 
-    HDFS-9686. Remove useless boxing/unboxing code.
-    (Kousuke Saruta via aajisaka)
-
   BUG FIXES
 
     HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e565ce7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index a8389f0..93d1b33 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -414,9 +414,7 @@ public abstract class FSEditLogOp {
   }
 
   @SuppressWarnings("unchecked")
-  static abstract class AddCloseOp
-         extends FSEditLogOp
-          implements BlockListUpdatingOp {
+  static abstract class AddCloseOp extends FSEditLogOp implements 
BlockListUpdatingOp {
     int length;
     long inodeId;
     String path;
@@ -637,8 +635,7 @@ public abstract class FSEditLogOp {
             NameNodeLayoutVersion.Feature.BLOCK_STORAGE_POLICY, logVersion)) {
           this.storagePolicyId = FSImageSerialization.readByte(in);
         } else {
-          this.storagePolicyId =
-              HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+          this.storagePolicyId = 
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
         }
         // read clientId and callId
         readRpcIds(in, logVersion);
@@ -718,7 +715,7 @@ public abstract class FSEditLogOp {
           Long.toString(inodeId));
       XMLUtils.addSaxString(contentHandler, "PATH", path);
       XMLUtils.addSaxString(contentHandler, "REPLICATION",
-          Short.toString(replication));
+          Short.valueOf(replication).toString());
       XMLUtils.addSaxString(contentHandler, "MTIME",
           Long.toString(mtime));
       XMLUtils.addSaxString(contentHandler, "ATIME",
@@ -746,7 +743,7 @@ public abstract class FSEditLogOp {
       this.length = Integer.parseInt(st.getValue("LENGTH"));
       this.inodeId = Long.parseLong(st.getValue("INODEID"));
       this.path = st.getValue("PATH");
-      this.replication = Short.parseShort(st.getValue("REPLICATION"));
+      this.replication = Short.valueOf(st.getValue("REPLICATION"));
       this.mtime = Long.parseLong(st.getValue("MTIME"));
       this.atime = Long.parseLong(st.getValue("ATIME"));
       this.blockSize = Long.parseLong(st.getValue("BLOCKSIZE"));
@@ -1187,12 +1184,12 @@ public abstract class FSEditLogOp {
     protected void toXml(ContentHandler contentHandler) throws SAXException {
       XMLUtils.addSaxString(contentHandler, "PATH", path);
       XMLUtils.addSaxString(contentHandler, "REPLICATION",
-          Short.toString(replication));
+          Short.valueOf(replication).toString());
     }
     
     @Override void fromXml(Stanza st) throws InvalidXmlException {
       this.path = st.getValue("PATH");
-      this.replication = Short.parseShort(st.getValue("REPLICATION"));
+      this.replication = Short.valueOf(st.getValue("REPLICATION"));
     }
   }
 
@@ -1980,13 +1977,13 @@ public abstract class FSEditLogOp {
     protected void toXml(ContentHandler contentHandler) throws SAXException {
       XMLUtils.addSaxString(contentHandler, "SRC", src);
       XMLUtils.addSaxString(contentHandler, "MODE",
-          Short.toString(permissions.toShort()));
+          Short.valueOf(permissions.toShort()).toString());
     }
     
     @Override void fromXml(Stanza st) throws InvalidXmlException {
       this.src = st.getValue("SRC");
       this.permissions = new FsPermission(
-          Short.parseShort(st.getValue("MODE")));
+          Short.valueOf(st.getValue("MODE")));
     }
   }
 
@@ -4470,13 +4467,13 @@ public abstract class FSEditLogOp {
     protected void toXml(ContentHandler contentHandler) throws SAXException {
       XMLUtils.addSaxString(contentHandler, "PATH", path);
       XMLUtils.addSaxString(contentHandler, "POLICYID",
-          Byte.toString(policyId));
+          Byte.valueOf(policyId).toString());
     }
 
     @Override
     void fromXml(Stanza st) throws InvalidXmlException {
       this.path = st.getValue("PATH");
-      this.policyId = Byte.parseByte(st.getValue("POLICYID"));
+      this.policyId = Byte.valueOf(st.getValue("POLICYID"));
     }
   }  
 
@@ -4953,8 +4950,7 @@ public abstract class FSEditLogOp {
 
   public static void delegationTokenToXml(ContentHandler contentHandler,
       DelegationTokenIdentifier token) throws SAXException {
-    contentHandler.startElement(
-        "", "", "DELEGATION_TOKEN_IDENTIFIER", new AttributesImpl());
+    contentHandler.startElement("", "", "DELEGATION_TOKEN_IDENTIFIER", new 
AttributesImpl());
     XMLUtils.addSaxString(contentHandler, "KIND", token.getKind().toString());
     XMLUtils.addSaxString(contentHandler, "SEQUENCE_NUMBER",
         Integer.toString(token.getSequenceNumber()));
@@ -5000,8 +4996,7 @@ public abstract class FSEditLogOp {
 
   public static void delegationKeyToXml(ContentHandler contentHandler,
       DelegationKey key) throws SAXException {
-    contentHandler.startElement(
-        "", "", "DELEGATION_KEY", new AttributesImpl());
+    contentHandler.startElement("", "", "DELEGATION_KEY", new 
AttributesImpl());
     XMLUtils.addSaxString(contentHandler, "KEY_ID",
         Integer.toString(key.getKeyId()));
     XMLUtils.addSaxString(contentHandler, "EXPIRY_DATE",
@@ -5029,8 +5024,7 @@ public abstract class FSEditLogOp {
 
   public static void permissionStatusToXml(ContentHandler contentHandler,
       PermissionStatus perm) throws SAXException {
-    contentHandler.startElement(
-        "", "", "PERMISSION_STATUS", new AttributesImpl());
+    contentHandler.startElement("", "", "PERMISSION_STATUS", new 
AttributesImpl());
     XMLUtils.addSaxString(contentHandler, "USERNAME", perm.getUserName());
     XMLUtils.addSaxString(contentHandler, "GROUPNAME", perm.getGroupName());
     fsPermissionToXml(contentHandler, perm.getPermission());
@@ -5048,13 +5042,13 @@ public abstract class FSEditLogOp {
 
   public static void fsPermissionToXml(ContentHandler contentHandler,
       FsPermission mode) throws SAXException {
-    XMLUtils.addSaxString(contentHandler, "MODE",
-        Short.toString(mode.toShort()));
+    XMLUtils.addSaxString(contentHandler, "MODE", Short.valueOf(mode.toShort())
+        .toString());
   }
 
   public static FsPermission fsPermissionFromXml(Stanza st)
       throws InvalidXmlException {
-    short mode = Short.parseShort(st.getValue("MODE"));
+    short mode = Short.valueOf(st.getValue("MODE"));
     return new FsPermission(mode);
   }
 
@@ -5063,8 +5057,7 @@ public abstract class FSEditLogOp {
     XMLUtils.addSaxString(contentHandler, "PERM", v.SYMBOL);
   }
 
-  private static FsAction fsActionFromXml(Stanza st)
-      throws InvalidXmlException {
+  private static FsAction fsActionFromXml(Stanza st) throws 
InvalidXmlException {
     FsAction v = FSACTION_SYMBOL_MAP.get(st.getValue("PERM"));
     if (v == null)
       throw new InvalidXmlException("Invalid value for FsAction");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e565ce7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
index 2834ebb..63438ff 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
@@ -173,8 +173,7 @@ public class RollingWindowManager {
    * @param user the user that updated the metric
    * @param delta the amount of change in the metric, e.g., +1
    */
-  public void recordMetric(long time, String command,
-      String user, long delta) {
+  public void recordMetric(long time, String command, String user, long delta) 
{
     RollingWindow window = getRollingWindow(command, user);
     window.incAt(time, delta);
   }
@@ -209,7 +208,7 @@ public class RollingWindowManager {
       }
       for (int i = 0; i < size; i++) {
         NameValuePair userEntry = reverse.pop();
-        User user = new User(userEntry.name, userEntry.value);
+        User user = new User(userEntry.name, Long.valueOf(userEntry.value));
         op.addUser(user);
       }
     }
@@ -244,8 +243,7 @@ public class RollingWindowManager {
           metricName, userName, windowSum);
       topN.offer(new NameValuePair(userName, windowSum));
     }
-    LOG.debug("topN users size for command {} is: {}",
-        metricName, topN.size());
+    LOG.debug("topN users size for command {} is: {}", metricName, 
topN.size());
     return topN;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e565ce7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java
index 13dc997..da4b321 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java
@@ -69,8 +69,7 @@ import java.util.concurrent.TimeUnit;
  * For example, if the user wants to test reading 1024MB data with 10 clients,
  * he/she should firstly generate 1024MB data with 10 (or more) clients.
  */
-public class ErasureCodeBenchmarkThroughput
-       extends Configured implements Tool {
+public class ErasureCodeBenchmarkThroughput extends Configured implements Tool 
{
 
   private static final int BUFFER_SIZE_MB = 128;
   private static final String DFS_TMP_DIR = System.getProperty(
@@ -115,15 +114,13 @@ public class ErasureCodeBenchmarkThroughput
       System.out.println(msg);
     }
     System.err.println("Usage: ErasureCodeBenchmarkThroughput " +
-        "<read|write|gen|clean> <size in MB> " +
-        "<ec|rep> [num clients] [stf|pos]\n" +
-        "Stateful and positional option is only available for read.");
+        "<read|write|gen|clean> <size in MB> <ec|rep> [num clients] 
[stf|pos]\n"
+        + "Stateful and positional option is only available for read.");
     System.exit(1);
   }
 
-  private List<Long> doBenchmark(boolean isRead, int dataSizeMB,
-      int numClients, boolean isEc, boolean statefulRead, boolean isGen)
-      throws Exception {
+  private List<Long> doBenchmark(boolean isRead, int dataSizeMB, int 
numClients,
+      boolean isEc, boolean statefulRead, boolean isGen) throws Exception {
     CompletionService<Long> cs = new ExecutorCompletionService<Long>(
         Executors.newFixedThreadPool(numClients));
     for (int i = 0; i < numClients; i++) {
@@ -220,7 +217,7 @@ public class ErasureCodeBenchmarkThroughput
         printUsage("Unknown operation: " + args[0]);
       }
       try {
-        dataSizeMB = Integer.parseInt(args[1]);
+        dataSizeMB = Integer.valueOf(args[1]);
         if (dataSizeMB <= 0) {
           printUsage("Invalid data size: " + dataSizeMB);
         }
@@ -236,7 +233,7 @@ public class ErasureCodeBenchmarkThroughput
     }
     if (args.length >= 4 && type != OpType.CLEAN) {
       try {
-        numClients = Integer.parseInt(args[3]);
+        numClients = Integer.valueOf(args[3]);
         if (numClients <= 0) {
           printUsage("Invalid num of clients: " + numClients);
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e565ce7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
index a069003..e6ee7f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
@@ -72,9 +72,9 @@ import org.junit.Test;
 /**
  * Test for short circuit read functionality using {@link BlockReaderLocal}.
  * When a block is being read by a client is on the local datanode, instead of
- * using {@link DataTransferProtocol} and connect to datanode,
- * the short circuit read allows reading the file directly
- * from the files on the local file system.
+ * using {@link DataTransferProtocol} and connect to datanode, the short 
circuit
+ * read allows reading the file directly from the files on the local file
+ * system.
  */
 public class TestShortCircuitLocalRead {
   private static TemporarySocketDirectory sockDir;
@@ -195,8 +195,7 @@ public class TestShortCircuitLocalRead {
 
     HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);
 
-    ByteBuffer actual =
-        ByteBuffer.allocateDirect(expected.length - readOffset);
+    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - 
readOffset);
 
     IOUtils.skipFully(stm, readOffset);
 
@@ -231,8 +230,7 @@ public class TestShortCircuitLocalRead {
 
   public void doTestShortCircuitReadLegacy(boolean ignoreChecksum, int size,
       int readOffset, String shortCircuitUser, String readingUser,
-      boolean legacyShortCircuitFails)
-      throws IOException, InterruptedException {
+      boolean legacyShortCircuitFails) throws IOException, 
InterruptedException {
     doTestShortCircuitReadImpl(ignoreChecksum, size, readOffset,
         shortCircuitUser, readingUser, legacyShortCircuitFails);
   }
@@ -249,8 +247,7 @@ public class TestShortCircuitLocalRead {
    */
   public void doTestShortCircuitReadImpl(boolean ignoreChecksum, int size,
       int readOffset, String shortCircuitUser, String readingUser,
-      boolean legacyShortCircuitFails)
-      throws IOException, InterruptedException {
+      boolean legacyShortCircuitFails) throws IOException, 
InterruptedException {
     Configuration conf = new Configuration();
     conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
     conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
@@ -265,8 +262,7 @@ public class TestShortCircuitLocalRead {
     if (shortCircuitUser != null) {
       conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
           shortCircuitUser);
-      conf.setBoolean(
-          HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
+      
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, 
true);
     }
     if (simulatedStorage) {
       SimulatedFSDataset.setFactory(conf);
@@ -328,8 +324,7 @@ public class TestShortCircuitLocalRead {
    */
   @Test(timeout=60000)
   public void testLocalReadFallback() throws Exception {
-    doTestShortCircuitReadLegacy(
-        true, 13, 0, getCurrentUser(), "notallowed", true);
+    doTestShortCircuitReadLegacy(true, 13, 0, getCurrentUser(), "notallowed", 
true);
   }
 
   @Test(timeout=60000)
@@ -371,9 +366,8 @@ public class TestShortCircuitLocalRead {
       ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
       Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
       final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
-      ClientDatanodeProtocol proxy =
-          DFSUtilClient.createClientDatanodeProtocolProxy(
-              dnInfo, conf, 60000, false);
+      ClientDatanodeProtocol proxy = 
+          DFSUtilClient.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, 
false);
       try {
         proxy.getBlockLocalPathInfo(blk, token);
         Assert.fail("The call should have failed as this user "
@@ -393,8 +387,7 @@ public class TestShortCircuitLocalRead {
     int size = blockSize;
     Configuration conf = new Configuration();
     conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
-    conf.setBoolean(
-        HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false);
+    conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, 
false);
     conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
         new File(sockDir.getDir(),
             "testSkipWithVerifyChecksum._PORT.sock").getAbsolutePath());
@@ -441,8 +434,7 @@ public class TestShortCircuitLocalRead {
     MiniDFSCluster cluster = null;
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
-    conf.setBoolean(
-        HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false);
+    conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, 
false);
     conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
         new File(sockDir.getDir(),
             "testHandleTruncatedBlockFile._PORT.sock").getAbsolutePath());
@@ -531,8 +523,8 @@ public class TestShortCircuitLocalRead {
       System.out.println("Usage: test shortcircuit checksum threadCount");
       System.exit(1);
     }
-    boolean shortcircuit = Boolean.parseBoolean(args[0]);
-    boolean checksum = Boolean.parseBoolean(args[1]);
+    boolean shortcircuit = Boolean.valueOf(args[0]);
+    boolean checksum = Boolean.valueOf(args[1]);
     int threadCount = Integer.parseInt(args[2]);
 
     // Setup create a file
@@ -543,8 +535,7 @@ public class TestShortCircuitLocalRead {
     conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
         checksum);
 
-    // Override fileSize and DATA_TO_WRITE to
-    // much larger values for benchmark test
+    // Override fileSize and DATA_TO_WRITE to much larger values for benchmark 
test
     int fileSize = 1000 * blockSize + 100; // File with 1000 blocks
     final byte [] dataToWrite = AppendTestUtil.randomBytes(seed, fileSize);
 
@@ -566,8 +557,7 @@ public class TestShortCircuitLocalRead {
           for (int i = 0; i < iteration; i++) {
             try {
               String user = getCurrentUser();
-              checkFileContent(
-                  fs.getUri(), file1, dataToWrite, 0, user, conf, true);
+              checkFileContent(fs.getUri(), file1, dataToWrite, 0, user, conf, 
true);
             } catch (IOException e) {
               e.printStackTrace();
             } catch (InterruptedException e) {
@@ -600,13 +590,11 @@ public class TestShortCircuitLocalRead {
    * through RemoteBlockReader
    * @throws IOException
   */
-  public void doTestShortCircuitReadWithRemoteBlockReader(
-      boolean ignoreChecksum, int size, String shortCircuitUser,
-      int readOffset, boolean shortCircuitFails)
-      throws IOException, InterruptedException {
+  public void doTestShortCircuitReadWithRemoteBlockReader(boolean 
ignoreChecksum,
+      int size, String shortCircuitUser, int readOffset,
+      boolean shortCircuitFails) throws IOException, InterruptedException {
     Configuration conf = new Configuration();
-    conf.setBoolean(
-        HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
+    conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, 
true);
     conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
 
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
@@ -615,8 +603,7 @@ public class TestShortCircuitLocalRead {
     // check that / exists
     Path path = new Path("/");
     URI uri = cluster.getURI();
-    assertTrue(
-        "/ should be a directory", fs.getFileStatus(path).isDirectory());
+    assertTrue("/ should be a directory", 
fs.getFileStatus(path).isDirectory());
 
     byte[] fileData = AppendTestUtil.randomBytes(seed, size);
     Path file1 = new Path("filelocal.dat");
@@ -628,12 +615,10 @@ public class TestShortCircuitLocalRead {
       checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
           conf, shortCircuitFails);
       //RemoteBlockReader have unsupported method read(ByteBuffer bf)
-      assertTrue(
-          "RemoteBlockReader unsupported method read(ByteBuffer bf) error",
-          checkUnsupportedMethod(fs, file1, fileData, readOffset));
+      assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) 
error",
+                    checkUnsupportedMethod(fs, file1, fileData, readOffset));
     } catch(IOException e) {
-      throw new IOException(
-          "doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
+      throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex 
error ", e);
     } catch(InterruptedException inEx) {
       throw inEx;
     } finally {
@@ -645,8 +630,7 @@ public class TestShortCircuitLocalRead {
   private boolean checkUnsupportedMethod(FileSystem fs, Path file,
       byte[] expected, int readOffset) throws IOException {
     HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
-    ByteBuffer actual =
-        ByteBuffer.allocateDirect(expected.length - readOffset);
+    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - 
readOffset);
     IOUtils.skipFully(stm, readOffset);
     try {
       stm.read(actual);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e565ce7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
----------------------------------------------------------------------
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
index e22a12e..fc64996 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
@@ -245,63 +245,6 @@ import org.junit.Test;
      verify(fs).delete(stagingJobPath, true);
    }
 
-  @Test
-  public void testByPreserveFailedStaging() throws IOException {
-    conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
-    // Failed task's staging files should be kept
-    conf.setBoolean(MRJobConfig.PRESERVE_FAILED_TASK_FILES, true);
-    fs = mock(FileSystem.class);
-    when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
-    //Staging Dir exists
-    String user = UserGroupInformation.getCurrentUser().getShortUserName();
-    Path stagingDir = MRApps.getStagingAreaDir(conf, user);
-    when(fs.exists(stagingDir)).thenReturn(true);
-    ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(),
-            0);
-    ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 
1);
-    JobId jobid = recordFactory.newRecordInstance(JobId.class);
-    jobid.setAppId(appId);
-    ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
-    Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
-    MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc,
-            JobStateInternal.FAILED, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
-    appMaster.init(conf);
-    appMaster.start();
-    appMaster.shutDownJob();
-    //test whether notifyIsLastAMRetry called
-    Assert.assertEquals(true, ((TestMRApp)appMaster).getTestIsLastAMRetry());
-    verify(fs, times(0)).delete(stagingJobPath, true);
-  }
-
-  @Test
-  public void testPreservePatternMatchedStaging() throws IOException {
-    conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
-    // The staging files that are matched to the pattern
-    // should not be deleted
-    conf.set(MRJobConfig.PRESERVE_FILES_PATTERN, "JobDir");
-    fs = mock(FileSystem.class);
-    when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
-    //Staging Dir exists
-    String user = UserGroupInformation.getCurrentUser().getShortUserName();
-    Path stagingDir = MRApps.getStagingAreaDir(conf, user);
-    when(fs.exists(stagingDir)).thenReturn(true);
-    ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(),
-            0);
-    ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 
1);
-    JobId jobid = recordFactory.newRecordInstance(JobId.class);
-    jobid.setAppId(appId);
-    ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
-    Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
-    MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc,
-            JobStateInternal.RUNNING, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
-    appMaster.init(conf);
-    appMaster.start();
-    appMaster.shutDownJob();
-    //test whether notifyIsLastAMRetry called
-    Assert.assertEquals(true, ((TestMRApp)appMaster).getTestIsLastAMRetry());
-    verify(fs, times(0)).delete(stagingJobPath, true);
-  }
-
    private class TestMRApp extends MRAppMaster {
      ContainerAllocator allocator;
      boolean testIsLastAMRetry = false;

Reply via email to