svn commit: r1412077 - in /hadoop/common/trunk/hadoop-common-project/hadoop-common: CHANGES.txt src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java src/test/java/org/apache/hadoop/fs/TestDe

2012-11-21 Thread tomwhite
Author: tomwhite
Date: Wed Nov 21 12:29:37 2012
New Revision: 1412077

URL: http://svn.apache.org/viewvc?rev=1412077view=rev
Log:
HADOOP-9049. DelegationTokenRenewer needs to be Singleton and FileSystems 
should register/deregister to/from. Contributed by Karthik Kambatla.

Added:

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java
   (with props)
Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1412077r1=1412076r2=1412077view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Wed Nov 
21 12:29:37 2012
@@ -444,6 +444,9 @@ Release 2.0.3-alpha - Unreleased 
 
 HADOOP-6607. Add different variants of non caching HTTP headers. (tucu)
 
+HADOOP-9049. DelegationTokenRenewer needs to be Singleton and FileSystems
+should register/deregister to/from. (Karthik Kambatla via tomwhite)
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java?rev=1412077r1=1412076r2=1412077view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
 Wed Nov 21 12:29:37 2012
@@ -33,7 +33,7 @@ import org.apache.hadoop.util.Time;
  * A daemon thread that waits for the next file system to renew.
  */
 @InterfaceAudience.Private
-public class DelegationTokenRenewerT extends FileSystem  
DelegationTokenRenewer.Renewable
+public class DelegationTokenRenewer
 extends Thread {
   /** The renewable interface used by the renewer. */
   public interface Renewable {
@@ -93,7 +93,7 @@ public class DelegationTokenRenewerT ex
  * @param newTime the new time
  */
 private void updateRenewalTime() {
-  renewalTime = RENEW_CYCLE + Time.now();
+  renewalTime = renewCycle + Time.now();
 }
 
 /**
@@ -134,34 +134,69 @@ public class DelegationTokenRenewerT ex
   }
 
   /** Wait for 95% of a day between renewals */
-  private static final int RENEW_CYCLE = 24 * 60 * 60 * 950;
+  private static final int RENEW_CYCLE = 24 * 60 * 60 * 950; 
 
-  private DelayQueueRenewActionT queue = new DelayQueueRenewActionT();
+  @InterfaceAudience.Private
+  protected static int renewCycle = RENEW_CYCLE;
 
-  public DelegationTokenRenewer(final ClassT clazz) {
+  /** Queue to maintain the RenewActions to be processed by the {@link #run()} 
*/
+  private volatile DelayQueueRenewAction? queue = new 
DelayQueueRenewAction?();
+  
+  /**
+   * Create the singleton instance. However, the thread can be started lazily 
in
+   * {@link #addRenewAction(FileSystem)}
+   */
+  private static DelegationTokenRenewer INSTANCE = null;
+
+  private DelegationTokenRenewer(final Class? extends FileSystem clazz) {
 super(clazz.getSimpleName() + - + 
DelegationTokenRenewer.class.getSimpleName());
 setDaemon(true);
   }
 
+  public static synchronized DelegationTokenRenewer getInstance() {
+if (INSTANCE == null) {
+  INSTANCE = new DelegationTokenRenewer(FileSystem.class);
+}
+return INSTANCE;
+  }
+
   /** Add a renew action to the queue. */
-  public void addRenewAction(final T fs) {
+  public synchronized T extends FileSystem  Renewable void 
addRenewAction(final T fs) {
 queue.add(new RenewActionT(fs));
+if (!isAlive()) {
+  start();
+}
   }
 
+  /** Remove the associated renew action from the queue */
+  public synchronized T extends FileSystem  Renewable void 
removeRenewAction(
+  final T fs) {
+for (RenewAction? action : queue) {
+  if (action.weakFs.get() == fs) {
+queue.remove(action);
+return;
+  }
+}
+  }
+
+  @SuppressWarnings(static-access)
   @Override
   public void run() {
 for(;;) {
-  RenewActionT action = null;
+  RenewAction? action = null;
   try {
-action = queue.take();
-if (action.renew()) {
-  action.updateRenewalTime();
-  queue.add(action);
+synchronized (this) {
+  action = queue.take();
+  if (action.renew()) {
+   

svn commit: r1412079 - in /hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common: CHANGES.txt src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java src/test/java/org/apache/hado

2012-11-21 Thread tomwhite
Author: tomwhite
Date: Wed Nov 21 12:37:42 2012
New Revision: 1412079

URL: http://svn.apache.org/viewvc?rev=1412079view=rev
Log:
Merge -r 1412076:1412077 from trunk to branch-2. Fixes: HADOOP-9049. 
DelegationTokenRenewer needs to be Singleton and FileSystems should 
register/deregister to/from. Contributed by Karthik Kambatla.

Added:

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java
  - copied unchanged from r1412077, 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java
Modified:

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1412079r1=1412078r2=1412079view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
Wed Nov 21 12:37:42 2012
@@ -152,6 +152,9 @@ Release 2.0.3-alpha - Unreleased 
 
 HADOOP-6607. Add different variants of non caching HTTP headers. (tucu)
 
+HADOOP-9049. DelegationTokenRenewer needs to be Singleton and FileSystems
+should register/deregister to/from. (Karthik Kambatla via tomwhite)
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java?rev=1412079r1=1412078r2=1412079view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
 (original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
 Wed Nov 21 12:37:42 2012
@@ -33,7 +33,7 @@ import org.apache.hadoop.util.Time;
  * A daemon thread that waits for the next file system to renew.
  */
 @InterfaceAudience.Private
-public class DelegationTokenRenewerT extends FileSystem  
DelegationTokenRenewer.Renewable
+public class DelegationTokenRenewer
 extends Thread {
   /** The renewable interface used by the renewer. */
   public interface Renewable {
@@ -93,7 +93,7 @@ public class DelegationTokenRenewerT ex
  * @param newTime the new time
  */
 private void updateRenewalTime() {
-  renewalTime = RENEW_CYCLE + Time.now();
+  renewalTime = renewCycle + Time.now();
 }
 
 /**
@@ -134,34 +134,69 @@ public class DelegationTokenRenewerT ex
   }
 
   /** Wait for 95% of a day between renewals */
-  private static final int RENEW_CYCLE = 24 * 60 * 60 * 950;
+  private static final int RENEW_CYCLE = 24 * 60 * 60 * 950; 
 
-  private DelayQueueRenewActionT queue = new DelayQueueRenewActionT();
+  @InterfaceAudience.Private
+  protected static int renewCycle = RENEW_CYCLE;
 
-  public DelegationTokenRenewer(final ClassT clazz) {
+  /** Queue to maintain the RenewActions to be processed by the {@link #run()} 
*/
+  private volatile DelayQueueRenewAction? queue = new 
DelayQueueRenewAction?();
+  
+  /**
+   * Create the singleton instance. However, the thread can be started lazily 
in
+   * {@link #addRenewAction(FileSystem)}
+   */
+  private static DelegationTokenRenewer INSTANCE = null;
+
+  private DelegationTokenRenewer(final Class? extends FileSystem clazz) {
 super(clazz.getSimpleName() + - + 
DelegationTokenRenewer.class.getSimpleName());
 setDaemon(true);
   }
 
+  public static synchronized DelegationTokenRenewer getInstance() {
+if (INSTANCE == null) {
+  INSTANCE = new DelegationTokenRenewer(FileSystem.class);
+}
+return INSTANCE;
+  }
+
   /** Add a renew action to the queue. */
-  public void addRenewAction(final T fs) {
+  public synchronized T extends FileSystem  Renewable void 
addRenewAction(final T fs) {
 queue.add(new RenewActionT(fs));
+if (!isAlive()) {
+  start();
+}
   }
 
+  /** Remove the associated renew action from the queue */
+  public synchronized T extends FileSystem  Renewable void 
removeRenewAction(
+  final T fs) {
+for (RenewAction? action : queue) {
+  if (action.weakFs.get() == fs) {
+queue.remove(action);
+return;
+  }
+}
+  }
+
+  @SuppressWarnings(static-access)
   @Override
   public 

svn commit: r1412218 - in /hadoop/common/branches/branch-1: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.ja

2012-11-21 Thread suresh
Author: suresh
Date: Wed Nov 21 18:10:19 2012
New Revision: 1412218

URL: http://svn.apache.org/viewvc?rev=1412218view=rev
Log:
HDFS-4208. NameNode could be stuck in SafeMode due to never-created blocks. 
Contributed by Brandon Li.

Modified:
hadoop/common/branches/branch-1/CHANGES.txt

hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1412218r1=1412217r2=1412218view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Wed Nov 21 18:10:19 2012
@@ -375,6 +375,9 @@ Release 1.1.1 - Unreleased
 HADOOP-8745. Incorrect version numbers in hadoop-core POM.
 (Matthias Friedrich via eli)
 
+HDFS-4208. NameNode could be stuck in SafeMode due to never-created
+blocks. (Brandon Li via suresh)
+
 Release 1.1.0 - 2012.09.28
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1412218r1=1412217r2=1412218view=diff
==
--- 
hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 (original)
+++ 
hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 Wed Nov 21 18:10:19 2012
@@ -5339,7 +5339,7 @@ public class FSNamesystem implements FSC
   void setBlockTotal() {
 if (safeMode == null)
   return;
-safeMode.setBlockTotal(blocksMap.size());
+safeMode.setBlockTotal((int)getSafeBlockCount());
   }
 
   /**
@@ -5350,6 +5350,50 @@ public class FSNamesystem implements FSC
   }
 
   /**
+   * There are times when blocks are allocated by a client but was never used 
to
+   * write to. This could happen because the response to block allocation
+   * request never made it to the client or the client failed right after block
+   * allocation. In such a case, NameNode might get stuck in safemode waiting
+   * for such blocks to be reported. To handle this, such blocks should not be
+   * counted toward total blocks needed to exit safemode.
+   * br
+   * This method returns the total number of blocks excluding the last blocks 
of
+   * files under construction with length zero.
+   */
+  long getSafeBlockCount() {
+// Calculate number of blocks excluded by SafeMode
+long numExculdedBlocks = 0;
+for (Lease lease : leaseManager.getSortedLeases()) {
+  for (String path : lease.getPaths()) {
+INode node = dir.getFileINode(path);
+if (node == null) {
+  LOG.error(Found a lease for nonexisting file:  + path);
+  continue;
+}
+if (!node.isUnderConstruction()) {
+  LOG.error(Found a lease for file that is not under construction:
+  + path);
+  continue;
+}
+INodeFileUnderConstruction cons = (INodeFileUnderConstruction) node;
+BlockInfo[] blocks = cons.getBlocks();
+if (blocks == null) {
+  continue;
+}
+// Exclude the last block of a file under construction with zero length
+if (blocks[blocks.length - 1].getNumBytes() == 0) {
+  numExculdedBlocks++;
+}
+  }
+}
+LOG.info(Number of blocks excluded by SafeMode:  + numExculdedBlocks
++  total blocks:  + getBlocksTotal() +  and thus the safe blocks: 
++ (getBlocksTotal() - numExculdedBlocks));
+
+return getBlocksTotal() - numExculdedBlocks;
+  }
+  
+  /**
* Enter safe mode manually.
* @throws IOException
*/

Modified: 
hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1412218r1=1412217r2=1412218view=diff
==
--- 
hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
 (original)
+++ 
hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
 Wed Nov 21 18:10:19 2012
@@ -15,12 +15,17 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;

svn commit: r1412220 - in /hadoop/common/branches/branch-1.1: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.

2012-11-21 Thread suresh
Author: suresh
Date: Wed Nov 21 18:18:31 2012
New Revision: 1412220

URL: http://svn.apache.org/viewvc?rev=1412220view=rev
Log:
HDFS-4208. Merging 1412218 from branch-1 to branch-1.1

Modified:
hadoop/common/branches/branch-1.1/CHANGES.txt

hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java

Modified: hadoop/common/branches/branch-1.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/CHANGES.txt?rev=1412220r1=1412219r2=1412220view=diff
==
--- hadoop/common/branches/branch-1.1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.1/CHANGES.txt Wed Nov 21 18:18:31 2012
@@ -69,6 +69,9 @@ Release 1.1.1 - 2012.11.18
 HADOOP-8745. Incorrect version numbers in hadoop-core POM.
 (Matthias Friedrich via eli)
 
+HDFS-4208. NameNode could be stuck in SafeMode due to never-created
+blocks. (Brandon Li via suresh)
+
 Release 1.1.0 - 2012.09.28
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1412220r1=1412219r2=1412220view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 Wed Nov 21 18:18:31 2012
@@ -5266,7 +5266,7 @@ public class FSNamesystem implements FSC
   void setBlockTotal() {
 if (safeMode == null)
   return;
-safeMode.setBlockTotal(blocksMap.size());
+safeMode.setBlockTotal((int)getSafeBlockCount());
   }
 
   /**
@@ -5277,6 +5277,50 @@ public class FSNamesystem implements FSC
   }
 
   /**
+   * There are times when blocks are allocated by a client but was never used 
to
+   * write to. This could happen because the response to block allocation
+   * request never made it to the client or the client failed right after block
+   * allocation. In such a case, NameNode might get stuck in safemode waiting
+   * for such blocks to be reported. To handle this, such blocks should not be
+   * counted toward total blocks needed to exit safemode.
+   * br
+   * This method returns the total number of blocks excluding the last blocks 
of
+   * files under construction with length zero.
+   */
+  long getSafeBlockCount() {
+// Calculate number of blocks excluded by SafeMode
+long numExculdedBlocks = 0;
+for (Lease lease : leaseManager.getSortedLeases()) {
+  for (String path : lease.getPaths()) {
+INode node = dir.getFileINode(path);
+if (node == null) {
+  LOG.error(Found a lease for nonexisting file:  + path);
+  continue;
+}
+if (!node.isUnderConstruction()) {
+  LOG.error(Found a lease for file that is not under construction:
+  + path);
+  continue;
+}
+INodeFileUnderConstruction cons = (INodeFileUnderConstruction) node;
+BlockInfo[] blocks = cons.getBlocks();
+if (blocks == null) {
+  continue;
+}
+// Exclude the last block of a file under construction with zero length
+if (blocks[blocks.length - 1].getNumBytes() == 0) {
+  numExculdedBlocks++;
+}
+  }
+}
+LOG.info(Number of blocks excluded by SafeMode:  + numExculdedBlocks
++  total blocks:  + getBlocksTotal() +  and thus the safe blocks: 
++ (getBlocksTotal() - numExculdedBlocks));
+
+return getBlocksTotal() - numExculdedBlocks;
+  }
+  
+  /**
* Enter safe mode manually.
* @throws IOException
*/

Modified: 
hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1412220r1=1412219r2=1412220view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
 Wed Nov 21 18:18:31 2012
@@ -15,12 +15,17 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import 

svn commit: r1412229 - in /hadoop/common/branches/branch-1: ./ src/core/org/apache/hadoop/fs/ src/hdfs/org/apache/hadoop/hdfs/tools/ src/test/org/apache/hadoop/fs/ src/test/org/apache/hadoop/hdfs/

2012-11-21 Thread suresh
Author: suresh
Date: Wed Nov 21 18:59:43 2012
New Revision: 1412229

URL: http://svn.apache.org/viewvc?rev=1412229view=rev
Log:
HDFS-4207. All hadoop fs operations fail if the default fs is down even if a 
different fs is specified in the command. Contributed by Jing Zhao.

Modified:
hadoop/common/branches/branch-1/CHANGES.txt
hadoop/common/branches/branch-1/src/core/org/apache/hadoop/fs/FsShell.java

hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java

hadoop/common/branches/branch-1/src/test/org/apache/hadoop/fs/TestFsShellReturnCode.java

hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestDFSShell.java

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1412229r1=1412228r2=1412229view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Wed Nov 21 18:59:43 2012
@@ -306,6 +306,9 @@ Release 1.2.0 - unreleased
 HADOOP-9036. Fix racy test case TestSinkQueue (Backport HADOOP-7292).
 (Luke Lu backport by suresh)
 
+HDFS-4207. All hadoop fs operations fail if the default fs is down even if 
+a different fs is specified in the command. (Jing Zhao via suresh)
+
 Release 1.1.1 - Unreleased
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/branches/branch-1/src/core/org/apache/hadoop/fs/FsShell.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/core/org/apache/hadoop/fs/FsShell.java?rev=1412229r1=1412228r2=1412229view=diff
==
--- hadoop/common/branches/branch-1/src/core/org/apache/hadoop/fs/FsShell.java 
(original)
+++ hadoop/common/branches/branch-1/src/core/org/apache/hadoop/fs/FsShell.java 
Wed Nov 21 18:59:43 2012
@@ -22,10 +22,11 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
-import java.text.DecimalFormat;
-import java.text.NumberFormat;
 import java.text.SimpleDateFormat;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.TimeZone;
 import java.util.zip.GZIPInputStream;
 
 import org.apache.hadoop.conf.Configuration;
@@ -41,14 +42,14 @@ import org.apache.hadoop.io.WritableComp
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.util.StringUtils;
 
 /** Provide command line access to a FileSystem. */
 public class FsShell extends Configured implements Tool {
 
-  protected FileSystem fs;
+  private FileSystem fs;
   private Trash trash;
   public static final SimpleDateFormat dateForm = 
 new SimpleDateFormat(-MM-dd HH:mm);
@@ -78,15 +79,22 @@ public class FsShell extends Configured 
   
   protected void init() throws IOException {
 getConf().setQuietMode(true);
-if (this.fs == null) {
- this.fs = FileSystem.get(getConf());
+  }
+  
+  protected FileSystem getFS() throws IOException {
+if (fs == null) {
+  fs = FileSystem.get(getConf());
 }
-if (this.trash == null) {
-  this.trash = new Trash(getConf());
+return fs;
+  }
+  
+  protected Trash getTrash() throws IOException {
+if (trash == null) {
+  trash = new Trash(getConf());
 }
+return trash;
   }
 
-  
   /**
* Copies from stdin to the indicated file.
*/
@@ -360,7 +368,9 @@ public class FsShell extends Configured 
 DataOutputBuffer outbuf;
 
 public TextRecordInputStream(FileStatus f) throws IOException {
-  r = new SequenceFile.Reader(fs, f.getPath(), getConf());
+  FileSystem pFS = f == null ? getFS() : f.getPath().getFileSystem(
+  getConf());
+  r = new SequenceFile.Reader(pFS, f.getPath(), getConf());
   key = 
ReflectionUtils.newInstance(r.getKeyClass().asSubclass(WritableComparable.class),
 getConf());
   val = 
ReflectionUtils.newInstance(r.getValueClass().asSubclass(Writable.class),
@@ -468,11 +478,12 @@ public class FsShell extends Configured 
   System.out.flush();
 
   boolean printWarning = false;
-  FileStatus status = fs.getFileStatus(f);
+  FileSystem pFS = f.getFileSystem(getConf());
+  FileStatus status = pFS.getFileStatus(f);
   long len = status.getLen();
 
   for(boolean done = false; !done; ) {
-BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
+BlockLocation[] locations = pFS.getFileBlockLocations(status, 0, len);
 int i = 0;
 for(; i  locations.length  
   locations[i].getHosts().length == rep; i++)
@@ -973,9 +984,10 @@ public class 

svn commit: r1412294 - in /hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common: CHANGES.txt src/main/java/org/apache/hadoop/util/StringUtils.java

2012-11-21 Thread tgraves
Author: tgraves
Date: Wed Nov 21 21:03:01 2012
New Revision: 1412294

URL: http://svn.apache.org/viewvc?rev=1412294view=rev
Log:
HADOOP-8931. Add Java version to startup message. (eli)

Modified:

hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java

Modified: 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1412294r1=1412293r2=1412294view=diff
==
--- 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt
 (original)
+++ 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt
 Wed Nov 21 21:03:01 2012
@@ -8,6 +8,8 @@ Release 0.23.6 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-8931. Add Java version to startup message. (eli)
+
   OPTIMIZATIONS
 
   BUG FIXES

Modified: 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java?rev=1412294r1=1412293r2=1412294view=diff
==
--- 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
 (original)
+++ 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
 Wed Nov 21 21:03:01 2012
@@ -604,7 +604,8 @@ public class StringUtils {
   build =  + VersionInfo.getUrl() +  -r 
  + VersionInfo.getRevision()  
  + ; compiled by ' + VersionInfo.getUser()
- + ' on  + VersionInfo.getDate()}
+ + ' on  + VersionInfo.getDate(),
+  java =  + System.getProperty(java.version) }
 )
   );
 




svn commit: r1412297 - in /hadoop/common/branches/HDFS-2802: ./ hadoop-project/pom.xml pom.xml

2012-11-21 Thread szetszwo
Author: szetszwo
Date: Wed Nov 21 21:08:45 2012
New Revision: 1412297

URL: http://svn.apache.org/viewvc?rev=1412297view=rev
Log:
Merge r1410998 through r1412282 from trunk.

Modified:
hadoop/common/branches/HDFS-2802/   (props changed)
hadoop/common/branches/HDFS-2802/hadoop-project/pom.xml
hadoop/common/branches/HDFS-2802/pom.xml

Propchange: hadoop/common/branches/HDFS-2802/
--
  Merged /hadoop/common/trunk:r1410998-1412282

Modified: hadoop/common/branches/HDFS-2802/hadoop-project/pom.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-project/pom.xml?rev=1412297r1=1412296r2=1412297view=diff
==
--- hadoop/common/branches/HDFS-2802/hadoop-project/pom.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-project/pom.xml Wed Nov 21 21:08:45 
2012
@@ -703,11 +703,6 @@
   groupIdorg.apache.maven.plugins/groupId
   artifactIdmaven-jar-plugin/artifactId
   version2.3.1/version
-  configuration
-excludes
-  excludemrapp-generated-classpath/exclude
-/excludes
-  /configuration
 /plugin
 plugin
   groupIdorg.apache.maven.plugins/groupId
@@ -803,21 +798,6 @@
 /executions
   /plugin
   plugin
-artifactIdmaven-dependency-plugin/artifactId
-executions
-  execution
-idbuild-classpath/id
-phasegenerate-sources/phase
-goals
-  goalbuild-classpath/goal
-/goals
-configuration
-  outputFiletarget/classes/mrapp-generated-classpath/outputFile
-/configuration
-  /execution
-/executions
-  /plugin
-  plugin
 groupIdorg.apache.maven.plugins/groupId
 artifactIdmaven-surefire-plugin/artifactId
 configuration

Modified: hadoop/common/branches/HDFS-2802/pom.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/pom.xml?rev=1412297r1=1412296r2=1412297view=diff
==
--- hadoop/common/branches/HDFS-2802/pom.xml (original)
+++ hadoop/common/branches/HDFS-2802/pom.xml Wed Nov 21 21:08:45 2012
@@ -517,7 +517,7 @@ xsi:schemaLocation=http://maven.apache.
 groupIdcom.atlassian.maven.plugins/groupId
 artifactIdmaven-clover2-plugin/artifactId
 configuration
-  includesAllSourceRootstrue/includesAllSourceRoots
+  includesAllSourceRootsfalse/includesAllSourceRoots
   includesTestSourceRootstrue/includesTestSourceRoots
   licenseLocation${cloverLicenseLocation}/licenseLocation
   cloverDatabase${cloverDatabase}/cloverDatabase