svn commit: r1389783 - in /hadoop/common/trunk/hadoop-common-project/hadoop-common: ./ src/main/java/org/apache/hadoop/fs/ src/main/java/org/apache/hadoop/security/ src/main/resources/ src/test/java/o

2012-09-25 Thread harsh
Author: harsh
Date: Tue Sep 25 10:17:11 2012
New Revision: 1389783

URL: http://svn.apache.org/viewvc?rev=1389783view=rev
Log:
HADOOP-7930. Kerberos relogin interval in UserGroupInformation should be 
configurable. Contributed by Robert Kanter. (harsh)

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1389783r1=1389782r2=1389783view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Tue Sep 
25 10:17:11 2012
@@ -108,6 +108,9 @@ Trunk (Unreleased)
 NullPointerException if the serializations list is empty.
 (Sho Shimauchi via harsh)
 
+HADOOP-7930. Kerberos relogin interval in UserGroupInformation
+should be configurable (Robert Kanter via harsh)
+
   BUG FIXES
 
 HADOOP-8177. MBeans shouldn't try to register when it fails to create 
MBeanName.

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java?rev=1389783r1=1389782r2=1389783view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 Tue Sep 25 10:17:11 2012
@@ -242,5 +242,11 @@ public class CommonConfigurationKeysPubl
   public static final String HADOOP_SSL_ENABLED_KEY = hadoop.ssl.enabled;
   public static final boolean HADOOP_SSL_ENABLED_DEFAULT = false;
 
+  /** See a href={@docRoot}/../core-default.htmlcore-default.xml/a */
+  public static final String HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN =
+  hadoop.kerberos.min.seconds.before.relogin;
+  /** Default value for HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN */
+  public static final int HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT =
+  60;
 }
 

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java?rev=1389783r1=1389782r2=1389783view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 Tue Sep 25 10:17:11 2012
@@ -18,6 +18,8 @@
 package org.apache.hadoop.security;
 
 import static 
org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
 
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
@@ -192,13 +194,12 @@ public class UserGroupInformation {
   private static boolean useKerberos;
   /** Server-side groups fetching service */
   private static Groups groups;
+  /** Min time (in seconds) before relogin for Kerberos */
+  private static long kerberosMinSecondsBeforeRelogin;
   /** The configuration to use */
   private static Configuration conf;
 
   
-  /** Leave 10 minutes between relogin attempts. */
-  private static final long MIN_TIME_BEFORE_RELOGIN = 10 * 60 * 1000L;
-  
   /**Environment variable pointing to the token cache file*/
   public static final String HADOOP_TOKEN_FILE_LOCATION = 
 HADOOP_TOKEN_FILE_LOCATION;
@@ -245,6 +246,16 @@ public class UserGroupInformation {
  HADOOP_SECURITY_AUTHENTICATION + 
   of  + value);
 }
+try {
+   

svn commit: r1389799 - in /hadoop/common/trunk/hadoop-common-project/hadoop-common: CHANGES.txt src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java src/test/java/org/apache/hadoop/fs/TestLocalDi

2012-09-25 Thread harsh
Author: harsh
Date: Tue Sep 25 11:10:11 2012
New Revision: 1389799

URL: http://svn.apache.org/viewvc?rev=1389799view=rev
Log:
HADOOP-8436. NPE In getLocalPathForWrite ( path, conf ) when the required 
context item is not configured. Contributed by Brahma Reddy Battula. (harsh)

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1389799r1=1389798r2=1389799view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Tue Sep 
25 11:10:11 2012
@@ -234,6 +234,10 @@ Trunk (Unreleased)
 HADOOP-8815. RandomDatum needs to override hashCode().
 (Brandon Li via suresh)
 
+HADOOP-8436. NPE In getLocalPathForWrite ( path, conf ) when the
+required context item is not configured
+(Brahma Reddy Battula via harsh)
+
   OPTIMIZATIONS
 
 HADOOP-7761. Improve the performance of raw comparisons. (todd)

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java?rev=1389799r1=1389798r2=1389799view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
 Tue Sep 25 11:10:11 2012
@@ -265,6 +265,9 @@ public class LocalDirAllocator {
 private synchronized void confChanged(Configuration conf) 
 throws IOException {
   String newLocalDirs = conf.get(contextCfgItemName);
+  if (null == newLocalDirs) {
+throw new IOException(contextCfgItemName +  not configured);
+  }
   if (!newLocalDirs.equals(savedLocalDirs)) {
 localDirs = StringUtils.getTrimmedStrings(newLocalDirs);
 localFS = FileSystem.getLocal(conf);

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java?rev=1389799r1=1389798r2=1389799view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
 Tue Sep 25 11:10:11 2012
@@ -293,6 +293,23 @@ public class TestLocalDirAllocator {
 }
   }
 
+  /*
+   * Test when mapred.local.dir not configured and called
+   * getLocalPathForWrite
+   */
+  @Test
+  public void testShouldNotthrowNPE() throws Exception {
+Configuration conf1 = new Configuration();
+try {
+  dirAllocator.getLocalPathForWrite(/test, conf1);
+  fail(Exception not thrown when  + CONTEXT +  is not set);
+} catch (IOException e) {
+  assertEquals(CONTEXT +  not configured, e.getMessage());
+} catch (NullPointerException e) {
+  fail(Lack of configuration should not have thrown an NPE.);
+}
+  }
+
   /** Test no side effect files are left over. After creating a temp
* temp file, remove both the temp file and its parent. Verify that
* no files or directories are left over as can happen when File objects




svn commit: r1389875 - /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

2012-09-25 Thread bobby
Author: bobby
Date: Tue Sep 25 14:22:17 2012
New Revision: 1389875

URL: http://svn.apache.org/viewvc?rev=1389875view=rev
Log:
HADOOP-8838. Colorize the test-patch output sent to JIRA (Harsh J via bobby)

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1389875r1=1389874r2=1389875view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Tue Sep 
25 14:22:17 2012
@@ -111,6 +111,9 @@ Trunk (Unreleased)
 HADOOP-7930. Kerberos relogin interval in UserGroupInformation
 should be configurable (Robert Kanter via harsh)
 
+HADOOP-8838. Colorize the test-patch output sent to JIRA (Harsh J via
+bobby)
+
   BUG FIXES
 
 HADOOP-8177. MBeans shouldn't try to register when it fails to create 
MBeanName.




svn commit: r1389888 - in /hadoop/common/branches/branch-1: CHANGES.txt src/mapred/org/apache/hadoop/mapred/JobHistory.java

2012-09-25 Thread tgraves
Author: tgraves
Date: Tue Sep 25 14:38:17 2012
New Revision: 1389888

URL: http://svn.apache.org/viewvc?rev=1389888view=rev
Log:
MAPREDUCE-4662.  JobHistoryFilesManager thread pool never expands (Kihwal Lee 
via tgraves)

Modified:
hadoop/common/branches/branch-1/CHANGES.txt

hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/JobHistory.java

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1389888r1=1389887r2=1389888view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue Sep 25 14:38:17 2012
@@ -254,6 +254,9 @@ Release 1.2.0 - unreleased
 MAPREDUCE-4652. ValueAggregatorJob sets the wrong job jar.
 (Ahmed Radwan via tomwhite)
 
+MAPREDUCE-4662.  JobHistoryFilesManager thread pool never expands 
+(Kihwal Lee via tgraves)
+
 Release 1.1.0 - unreleased
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/JobHistory.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/JobHistory.java?rev=1389888r1=1389887r2=1389888view=diff
==
--- 
hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/JobHistory.java
 (original)
+++ 
hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/JobHistory.java
 Tue Sep 25 14:38:17 2012
@@ -221,8 +221,11 @@ public class JobHistory {
 
 
 void start() {
-  executor = new ThreadPoolExecutor(1, 3, 1, 
+  executor = new ThreadPoolExecutor(5, 5, 1, 
   TimeUnit.HOURS, new LinkedBlockingQueueRunnable());
+  // make core threads to terminate if there has been no work
+  // for the keppalive period.
+  executor.allowCoreThreadTimeOut(true);
 }
 
 private FilesHolder getFileHolder(JobID id) {




svn commit: r1389909 - in /hadoop/common/site/main: author/src/documentation/content/xdocs/who.xml publish/who.html publish/who.pdf

2012-09-25 Thread bobby
Author: bobby
Date: Tue Sep 25 15:13:45 2012
New Revision: 1389909

URL: http://svn.apache.org/viewvc?rev=1389909view=rev
Log:
Adding myself as PMC member.

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
hadoop/common/site/main/publish/who.html
hadoop/common/site/main/publish/who.pdf

Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml?rev=1389909r1=1389908r2=1389909view=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml Tue 
Sep 25 15:13:45 2012
@@ -71,6 +71,14 @@
 td-8/td
   /tr
 
+   tr
+ tdbobby/td
+ tdRobert(Bobby) Evans/td
+ tdYahoo!/td
+ td/td
+ td-6/td
+   /tr
+
   tr
 tdcdouglas/td
 tda href=http://people.apache.org/~cdouglas;Chris Douglas/a/td

Modified: hadoop/common/site/main/publish/who.html
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/who.html?rev=1389909r1=1389908r2=1389909view=diff
==
--- hadoop/common/site/main/publish/who.html (original)
+++ hadoop/common/site/main/publish/who.html Tue Sep 25 15:13:45 2012
@@ -315,6 +315,17 @@ document.write(Last Published:  + docu
   
 /tr
 
+   
+tr
+ 
+td colspan=1 rowspan=1bobby/td
+ td colspan=1 rowspan=1Robert(Bobby) Evans/td
+ td colspan=1 rowspan=1Yahoo!/td
+ td colspan=1 rowspan=1/td
+ td colspan=1 rowspan=1-6/td
+   
+/tr
+
   
 tr
 
@@ -629,7 +640,7 @@ document.write(Last Published:  + docu
 /div
 
 
-a name=N103AB/aa name=Emeritus+Hadoop+PMC+Members/a
+a name=N103C6/aa name=Emeritus+Hadoop+PMC+Members/a
 h2 class=h3Emeritus Hadoop PMC Members/h2
 div class=section
 ul
@@ -644,7 +655,7 @@ document.write(Last Published:  + docu
 /div
 

-a name=N103BE/aa name=Hadoop+Committers/a
+a name=N103D9/aa name=Hadoop+Committers/a
 h2 class=h3Hadoop Committers/h2
 div class=section
 pHadoop's active committers include:/p
@@ -1259,7 +1270,7 @@ document.write(Last Published:  + docu
 /div
 

-a name=N109F5/aa name=Emeritus+Hadoop+Committers/a
+a name=N10A10/aa name=Emeritus+Hadoop+Committers/a
 h2 class=h3Emeritus Hadoop Committers/h2
 div class=section
 pHadoop committers who are no longer active include:/p

Modified: hadoop/common/site/main/publish/who.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/who.pdf?rev=1389909r1=1389908r2=1389909view=diff
==
Binary files - no diff available.




[Hadoop Wiki] Update of PoweredBy by WouterDeBie

2012-09-25 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The PoweredBy page has been changed by WouterDeBie:
http://wiki.apache.org/hadoop/PoweredBy?action=diffrev1=404rev2=405

  
   * ''[[http://www.spotify.com|Spotify]] ''
* ''We use Hadoop for content generation, data aggregation, reporting and 
analysis ''
-   * ''60 node cluster (1440 cores, 1TB RAM, 1.2 PB storage)''
+   * ''120 node cluster (2880 cores, 2TB RAM, 2.4 PB storage)''
  
   * ''[[http://stampedehost.com/|Stampede Data Solutions (Stampedehost.com)]] 
''
* ''Hosted Hadoop data warehouse solution provider ''


svn commit: r1389958 - /hadoop/common/branches/branch-1/CHANGES.txt

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 16:11:46 2012
New Revision: 1389958

URL: http://svn.apache.org/viewvc?rev=1389958view=rev
Log:
MAPREDUCE-3289. Merging to branch-1.1.

Modified:
hadoop/common/branches/branch-1/CHANGES.txt

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1389958r1=1389957r2=1389958view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue Sep 25 16:11:46 2012
@@ -76,9 +76,6 @@ Release 1.2.0 - unreleased
 
 HDFS-3697. Enable fadvise readahead by default. (todd via eli)
 
-MAPREDUCE-3289. Make use of fadvise in the NM's shuffle handler.
-(Todd Lipcon and Brandon Li via sseth)
-
 MAPREDUCE-4511. Add IFile readahead (ahmed via tucu)
 
 MAPREDUCE-4565. Backport MR-2855 to branch-1: ResourceBundle lookup during
@@ -407,6 +404,9 @@ Release 1.1.0 - unreleased
 HDFS-3814. Make the replication monitor multipliers configurable in 1.x.
 (Jing Zhao via suresh)
 
+MAPREDUCE-3289. Make use of fadvise in the NM's shuffle handler.
+(Todd Lipcon and Brandon Li via sseth)
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations




svn commit: r1389970 - in /hadoop/common/branches/branch-1.1: CHANGES.txt src/mapred/org/apache/hadoop/mapred/TaskTracker.java

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 16:27:41 2012
New Revision: 1389970

URL: http://svn.apache.org/viewvc?rev=1389970view=rev
Log:
Merge -c 1368724 from branch-1 to branch-1 to fix MAPREDUCE-3289. Make use of 
fadvise in the NM's shuffle handler.

Modified:
hadoop/common/branches/branch-1.1/CHANGES.txt

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/TaskTracker.java

Modified: hadoop/common/branches/branch-1.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/CHANGES.txt?rev=1389970r1=1389969r2=1389970view=diff
==
--- hadoop/common/branches/branch-1.1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.1/CHANGES.txt Tue Sep 25 16:27:41 2012
@@ -163,6 +163,9 @@ Release 1.1.0 - 2012.09.16
 HDFS-3617. Port HDFS-96 to branch-1 (support blocks greater than 2GB).
 (Patrick Kling and harsh via eli)
 
+MAPREDUCE-3289. Make use of fadvise in the NM's shuffle handler.
+(Todd Lipcon and Brandon Li via sseth)
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations

Modified: 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/TaskTracker.java?rev=1389970r1=1389969r2=1389970view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
 Tue Sep 25 16:27:41 2012
@@ -73,7 +73,10 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.ReadaheadPool;
+import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
 import org.apache.hadoop.io.SecureIOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.Server;
@@ -341,6 +344,9 @@ public class TaskTracker implements MRCo
 mapreduce.tasktracker.outofband.heartbeat.damper;
   static private final int DEFAULT_OOB_HEARTBEAT_DAMPER = 100;
   private volatile int oobHeartbeatDamper;
+  private boolean manageOsCacheInShuffle = false;
+  private int readaheadLength;
+  private ReadaheadPool readaheadPool = ReadaheadPool.getInstance();
   
   // Track number of completed tasks to send an out-of-band heartbeat
   private AtomicInteger finishedCount = new AtomicInteger(0);
@@ -881,6 +887,12 @@ public class TaskTracker implements MRCo
 oobHeartbeatDamper = 
   fConf.getInt(TT_OUTOFBAND_HEARTBEAT_DAMPER, 
   DEFAULT_OOB_HEARTBEAT_DAMPER);
+manageOsCacheInShuffle = fConf.getBoolean(
+  mapreduce.shuffle.manage.os.cache,
+  true);
+readaheadLength = fConf.getInt(
+  mapreduce.shuffle.readahead.bytes,
+  4 * 1024 * 1024);
   }
 
   private void startJettyBugMonitor() {
@@ -3978,16 +3990,30 @@ public class TaskTracker implements MRCo
  * send it to the reducer.
  */
 //open the map-output file
+String filePath = mapOutputFileName.toUri().getPath();
 mapOutputIn = SecureIOUtils.openForRead(
-new File(mapOutputFileName.toUri().getPath()), runAsUserName);
+new File(filePath), runAsUserName);
+//new File(mapOutputFileName.toUri().getPath()), runAsUserName);
 
+ReadaheadRequest curReadahead = null;
+
 //seek to the correct offset for the reduce
 mapOutputIn.skip(info.startOffset);
 long rem = info.partLength;
-int len =
-  mapOutputIn.read(buffer, 0, (int)Math.min(rem, MAX_BYTES_TO_READ));
-while (rem  0  len = 0) {
+long offset = info.startOffset;
+while (rem  0) {
+  if (tracker.manageOsCacheInShuffle  tracker.readaheadPool != null) 
{
+curReadahead = tracker.readaheadPool.readaheadStream(filePath,
+mapOutputIn.getFD(), offset, tracker.readaheadLength,
+info.startOffset + info.partLength, curReadahead);
+  }
+  int len = mapOutputIn.read(buffer, 0,
+  (int) Math.min(rem, MAX_BYTES_TO_READ));
+  if (len  0) {
+break;
+  }
   rem -= len;
+  offset += len;
   try {
 shuffleMetrics.outputBytes(len);
 outStream.write(buffer, 0, len);
@@ -3997,10 +4023,18 @@ public class TaskTracker implements MRCo
 throw ie;
   }
   totalRead += len;
-  len =
-mapOutputIn.read(buffer, 0, (int)Math.min(rem, MAX_BYTES_TO_READ));
 }
 
+if (curReadahead != null) {
+  

svn commit: r1389972 - /hadoop/common/branches/branch-1/CHANGES.txt

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 16:30:10 2012
New Revision: 1389972

URL: http://svn.apache.org/viewvc?rev=1389972view=rev
Log:
MAPREDUCE-4511. Merging to branch-1.1.

Modified:
hadoop/common/branches/branch-1/CHANGES.txt

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1389972r1=1389971r2=1389972view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue Sep 25 16:30:10 2012
@@ -76,8 +76,6 @@ Release 1.2.0 - unreleased
 
 HDFS-3697. Enable fadvise readahead by default. (todd via eli)
 
-MAPREDUCE-4511. Add IFile readahead (ahmed via tucu)
-
 MAPREDUCE-4565. Backport MR-2855 to branch-1: ResourceBundle lookup during
 counter name resolution takes a lot of time. (Karthik Kambatla via sseth)
 
@@ -407,6 +405,8 @@ Release 1.1.0 - unreleased
 MAPREDUCE-3289. Make use of fadvise in the NM's shuffle handler.
 (Todd Lipcon and Brandon Li via sseth)
 
+MAPREDUCE-4511. Add IFile readahead (ahmed via tucu)
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations




svn commit: r1389977 - in /hadoop/common/branches/branch-1.1: ./ src/mapred/ src/mapred/org/apache/hadoop/mapred/ src/test/org/apache/hadoop/mapred/

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 16:32:22 2012
New Revision: 1389977

URL: http://svn.apache.org/viewvc?rev=1389977view=rev
Log:
Merge -c 1373672 from branch-1 to branch-1 to fix MAPREDUCE-4511. Add IFile 
readahead.

Modified:
hadoop/common/branches/branch-1.1/CHANGES.txt
hadoop/common/branches/branch-1.1/src/mapred/mapred-default.xml

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/IFile.java

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/IFileInputStream.java

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/ReduceTask.java

hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/mapred/TestIFileStreams.java

Modified: hadoop/common/branches/branch-1.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/CHANGES.txt?rev=1389977r1=1389976r2=1389977view=diff
==
--- hadoop/common/branches/branch-1.1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.1/CHANGES.txt Tue Sep 25 16:32:22 2012
@@ -166,6 +166,8 @@ Release 1.1.0 - 2012.09.16
 MAPREDUCE-3289. Make use of fadvise in the NM's shuffle handler.
 (Todd Lipcon and Brandon Li via sseth)
 
+MAPREDUCE-4511. Add IFile readahead (ahmed via tucu)
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations

Modified: hadoop/common/branches/branch-1.1/src/mapred/mapred-default.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/mapred/mapred-default.xml?rev=1389977r1=1389976r2=1389977view=diff
==
--- hadoop/common/branches/branch-1.1/src/mapred/mapred-default.xml (original)
+++ hadoop/common/branches/branch-1.1/src/mapred/mapred-default.xml Tue Sep 25 
16:32:22 2012
@@ -972,7 +972,21 @@
 acceptable.
 /description
   /property
-  
+
+  property
+namemapreduce.ifile.readahead/name
+valuetrue/value
+descriptionConfiguration key to enable/disable IFile readahead.
+/description
+  /property
+
+  property
+namemapreduce.ifile.readahead.bytes/name
+value4194304/value
+descriptionConfiguration key to set the IFile readahead length in bytes.
+/description
+  /property
+
 !-- Job Notification Configuration --
 
 !--

Modified: 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/IFile.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/IFile.java?rev=1389977r1=1389976r2=1389977view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/IFile.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/IFile.java
 Tue Sep 25 16:32:22 2012
@@ -291,7 +291,7 @@ class IFile {
   CompressionCodec codec,
   Counters.Counter readsCounter) throws IOException {
   readRecordsCounter = readsCounter;
-  checksumIn = new IFileInputStream(in,length);
+  checksumIn = new IFileInputStream(in,length, conf);
   if (codec != null) {
 decompressor = CodecPool.getDecompressor(codec);
 this.in = codec.createInputStream(checksumIn, decompressor);

Modified: 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/IFileInputStream.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/IFileInputStream.java?rev=1389977r1=1389976r2=1389977view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/IFileInputStream.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/IFileInputStream.java
 Tue Sep 25 16:32:22 2012
@@ -19,11 +19,20 @@
 package org.apache.hadoop.mapred;
 
 import java.io.EOFException;
+import java.io.FileDescriptor;
+import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.HasFileDescriptor;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.ReadaheadPool;
+import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
 import org.apache.hadoop.util.DataChecksum;
 /**
  * A checksum input stream, used for IFiles.
@@ -32,7 +41,8 @@ import org.apache.hadoop.util.DataChecks
 
 class IFileInputStream extends InputStream {
   
-  private final InputStream in; //The input stream to be verified for 
checksum. 
+  private final InputStream in; //The input stream to be verified for checksum.
+  private final 

svn commit: r1389989 - /hadoop/common/branches/branch-1/CHANGES.txt

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 17:06:55 2012
New Revision: 1389989

URL: http://svn.apache.org/viewvc?rev=1389989view=rev
Log:
MAPREDUCE-1906. Merging to branch-1.1.

Modified:
hadoop/common/branches/branch-1/CHANGES.txt

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1389989r1=1389988r2=1389989view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue Sep 25 17:06:55 2012
@@ -68,10 +68,6 @@ Release 1.2.0 - unreleased
 MAPREDUCE-782. Use PureJavaCrc32 in mapreduce spills. 
 (Todd Lipcon, backport by Brandon Li via sseth)
 
-MAPREDUCE-1906. Lower minimum heartbeat interval between tasktracker and 
-JobTracker for smaller clusters. (Todd Lipcon, backport by 
-Brandon Li via sseth)
-
 HDFS-3667.  Add retry support to WebHdfsFileSystem.  (szetszwo)
 
 HDFS-3697. Enable fadvise readahead by default. (todd via eli)
@@ -407,6 +403,10 @@ Release 1.1.0 - unreleased
 
 MAPREDUCE-4511. Add IFile readahead (ahmed via tucu)
 
+MAPREDUCE-1906. Lower minimum heartbeat interval between tasktracker and 
+JobTracker for smaller clusters. (Todd Lipcon, backport by 
+Brandon Li via sseth)
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations




svn commit: r1389995 - in /hadoop/common/branches/branch-1.1: CHANGES.txt src/mapred/org/apache/hadoop/mapred/JobTracker.java src/mapred/org/apache/hadoop/mapred/MRConstants.java

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 17:11:58 2012
New Revision: 1389995

URL: http://svn.apache.org/viewvc?rev=1389995view=rev
Log:
Merge -c 1366285 from branch-1 to branch-1.1 to fix MAPREDUCE-1906. Lower 
minimum heartbeat interval between tasktracker and JobTracker for smaller 
clusters.

Modified:
hadoop/common/branches/branch-1.1/CHANGES.txt

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/JobTracker.java

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/MRConstants.java

Modified: hadoop/common/branches/branch-1.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/CHANGES.txt?rev=1389995r1=1389994r2=1389995view=diff
==
--- hadoop/common/branches/branch-1.1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.1/CHANGES.txt Tue Sep 25 17:11:58 2012
@@ -168,6 +168,10 @@ Release 1.1.0 - 2012.09.16
 
 MAPREDUCE-4511. Add IFile readahead (ahmed via tucu)
 
+MAPREDUCE-1906. Lower minimum heartbeat interval between tasktracker and 
+JobTracker for smaller clusters. (Todd Lipcon, backport by 
+Brandon Li via sseth)
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations

Modified: 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/JobTracker.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/JobTracker.java?rev=1389995r1=1389994r2=1389995view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/JobTracker.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/JobTracker.java
 Tue Sep 25 17:11:58 2012
@@ -3462,7 +3462,7 @@ public class JobTracker implements MRCon
 int clusterSize = getClusterStatus().getTaskTrackers();
 int heartbeatInterval =  Math.max(
 (int)(1000 * HEARTBEATS_SCALING_FACTOR *
-  Math.ceil((double)clusterSize / 
+  ((double)clusterSize / 
 NUM_HEARTBEATS_IN_SECOND)),
 HEARTBEAT_INTERVAL_MIN) ;
 return heartbeatInterval;

Modified: 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/MRConstants.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/MRConstants.java?rev=1389995r1=1389994r2=1389995view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/MRConstants.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/MRConstants.java
 Tue Sep 25 17:11:58 2012
@@ -25,7 +25,7 @@ interface MRConstants {
   //
   // Timeouts, constants
   //
-  public static final int HEARTBEAT_INTERVAL_MIN = 3 * 1000;
+  public static final int HEARTBEAT_INTERVAL_MIN = 300;
   
   public static final long COUNTER_UPDATE_INTERVAL = 60 * 1000;
 




svn commit: r1389996 - /hadoop/common/branches/branch-1/CHANGES.txt

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 17:13:08 2012
New Revision: 1389996

URL: http://svn.apache.org/viewvc?rev=1389996view=rev
Log:
MAPREDUCE-4558. Merging to branch-1.1.

Modified:
hadoop/common/branches/branch-1/CHANGES.txt

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1389996r1=1389995r2=1389996view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue Sep 25 17:13:08 2012
@@ -190,8 +190,6 @@ Release 1.2.0 - unreleased
 
 HDFS-3658. Fix bugs in TestDFSClientRetries and add more tests.  (szetszwo)
 
-MAPREDUCE-4558. Disable TestJobTrackerSafeMode (sseth)
-
 HADOOP-8611. Allow fall-back to the shell-based implementation when
 JNI-based users-group mapping fails (Robert Parker via bobby)
 
@@ -597,6 +595,8 @@ Release 1.1.0 - unreleased
 HDFS-3701. HDFS may miss the final block when reading a file opened for 
writing
 if one of the datanode is dead. (umamahesh and nkeywal via umamahesh)
 
+MAPREDUCE-4558. Disable TestJobTrackerSafeMode (sseth)
+
 Release 1.0.4 - Unreleased
 
   NEW FEATURES




svn commit: r1389998 - /hadoop/common/branches/branch-1/CHANGES.txt

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 17:19:11 2012
New Revision: 1389998

URL: http://svn.apache.org/viewvc?rev=1389998view=rev
Log:
MAPREDUCE-3837. Merging to branch-1.1.

Modified:
hadoop/common/branches/branch-1/CHANGES.txt

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1389998r1=1389997r2=1389998view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue Sep 25 17:19:11 2012
@@ -129,9 +129,6 @@ Release 1.2.0 - unreleased
 HADOOP-8249. invalid hadoop-auth cookies should trigger authentication if 
info 
 is avail before returning HTTP 401 (tucu)
 
-MAPREDUCE-3837. Job tracker is not able to recover job in case of crash
-and after that no user can submit job. (Mayank Bansal via tomwhite)
-
 HDFS-3595. Update the regular expression in TestEditLogLoading for the
 error message change by HDFS-3521.  (Colin Patrick McCabe via szetszwo)
 
@@ -595,6 +592,9 @@ Release 1.1.0 - unreleased
 HDFS-3701. HDFS may miss the final block when reading a file opened for 
writing
 if one of the datanode is dead. (umamahesh and nkeywal via umamahesh)
 
+MAPREDUCE-3837. Job tracker is not able to recover job in case of crash
+and after that no user can submit job. (Mayank Bansal via tomwhite)
+
 MAPREDUCE-4558. Disable TestJobTrackerSafeMode (sseth)
 
 Release 1.0.4 - Unreleased




svn commit: r1390006 - in /hadoop/common/branches/branch-1.1: ./ src/mapred/org/apache/hadoop/mapred/ src/test/org/apache/hadoop/mapred/

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 17:23:37 2012
New Revision: 1390006

URL: http://svn.apache.org/viewvc?rev=1390006view=rev
Log:
Merge -c 1356904 from branch-1 to branch-1.1 to fix MAPREDUCE-3837. Job tracker 
is not able to recover job in case of crash and after that no user can submit 
job.

Modified:
hadoop/common/branches/branch-1.1/CHANGES.txt

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/JobTracker.java

hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/mapred/TestJobTrackerRestartWithLostTracker.java

hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/mapred/TestJobTrackerSafeMode.java

hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/mapred/TestRecoveryManager.java

Modified: hadoop/common/branches/branch-1.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/CHANGES.txt?rev=1390006r1=1390005r2=1390006view=diff
==
--- hadoop/common/branches/branch-1.1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.1/CHANGES.txt Tue Sep 25 17:23:37 2012
@@ -365,6 +365,9 @@ Release 1.1.0 - 2012.09.16
 MAPREDUCE-4675. Fixed a race condition caused in TestKillSubProcesses 
caused
 due to a recent commit. (Bikas Saha via vinodkv)
 
+MAPREDUCE-3837. Job tracker is not able to recover job in case of crash
+and after that no user can submit job. (Mayank Bansal via tomwhite)
+
 Release 1.0.4 - Unreleased
 
   NEW FEATURES

Modified: 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/JobTracker.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/JobTracker.java?rev=1390006r1=1390005r2=1390006view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/JobTracker.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/JobTracker.java
 Tue Sep 25 17:23:37 2012
@@ -205,6 +205,7 @@ public class JobTracker implements MRCon
   State state = State.INITIALIZING;
   private static final int FS_ACCESS_RETRY_PERIOD = 1;
   static final String JOB_INFO_FILE = job-info;
+  static final String JOB_TOKEN_FILE = jobToken;
   private DNSToSwitchMapping dnsToSwitchMapping;
   private NetworkTopology clusterMap = new NetworkTopology();
   private int numTaskCacheLevels; // the max level to which we cache tasks
@@ -1215,179 +1216,6 @@ public class JobTracker implements MRCon
 /** A custom listener that replays the events in the order in which the 
  * events (task attempts) occurred. 
  */
-class JobRecoveryListener implements Listener {
-  // The owner job
-  private JobInProgress jip;
-  
-  private JobHistory.JobInfo job; // current job's info object
-  
-  // Maintain the count of the (attempt) events recovered
-  private int numEventsRecovered = 0;
-  
-  // Maintains open transactions
-  private MapString, String hangingAttempts = 
-new HashMapString, String();
-  
-  // Whether there are any updates for this job
-  private boolean hasUpdates = false;
-  
-  public JobRecoveryListener(JobInProgress jip) {
-this.jip = jip;
-this.job = new JobHistory.JobInfo(jip.getJobID().toString());
-  }
-
-  /**
-   * Process a task. Note that a task might commit a previously pending 
-   * transaction.
-   */
-  private void processTask(String taskId, JobHistory.Task task) {
-// Any TASK info commits the previous transaction
-boolean hasHanging = hangingAttempts.remove(taskId) != null;
-if (hasHanging) {
-  numEventsRecovered += 2;
-}
-
-TaskID id = TaskID.forName(taskId);
-TaskInProgress tip = getTip(id);
-
-updateTip(tip, task);
-  }
-
-  /**
-   * Adds a task-attempt in the listener
-   */
-  private void processTaskAttempt(String taskAttemptId, 
-  JobHistory.TaskAttempt attempt) 
-throws UnknownHostException {
-TaskAttemptID id = TaskAttemptID.forName(taskAttemptId);
-
-// Check if the transaction for this attempt can be committed
-String taskStatus = attempt.get(Keys.TASK_STATUS);
-TaskAttemptID taskID = TaskAttemptID.forName(taskAttemptId);
-JobInProgress jip = getJob(taskID.getJobID());
-JobStatus prevStatus = (JobStatus)jip.getStatus().clone();
-
-if (taskStatus.length()  0) {
-  // This means this is an update event
-  if (taskStatus.equals(Values.SUCCESS.name())) {
-// Mark this attempt as hanging
-hangingAttempts.put(id.getTaskID().toString(), taskAttemptId);
-addSuccessfulAttempt(jip, id, attempt);
-  } else {
-

svn commit: r1390009 - in /hadoop/common/branches/branch-1.1: CHANGES.txt src/test/org/apache/hadoop/mapred/TestJobTrackerSafeMode.java

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 17:26:18 2012
New Revision: 1390009

URL: http://svn.apache.org/viewvc?rev=1390009view=rev
Log:
Merge -c 1374095 from branch-1 to branch-1.1 to fix MAPREDUCE-4558. Disable 
TestJobTrackerSafeMode.

Modified:
hadoop/common/branches/branch-1.1/CHANGES.txt

hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/mapred/TestJobTrackerSafeMode.java

Modified: hadoop/common/branches/branch-1.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/CHANGES.txt?rev=1390009r1=1390008r2=1390009view=diff
==
--- hadoop/common/branches/branch-1.1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.1/CHANGES.txt Tue Sep 25 17:26:18 2012
@@ -368,6 +368,8 @@ Release 1.1.0 - 2012.09.16
 MAPREDUCE-3837. Job tracker is not able to recover job in case of crash
 and after that no user can submit job. (Mayank Bansal via tomwhite)
 
+MAPREDUCE-4558. Disable TestJobTrackerSafeMode (sseth)
+
 Release 1.0.4 - Unreleased
 
   NEW FEATURES

Modified: 
hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/mapred/TestJobTrackerSafeMode.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/mapred/TestJobTrackerSafeMode.java?rev=1390009r1=1390008r2=1390009view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/mapred/TestJobTrackerSafeMode.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/mapred/TestJobTrackerSafeMode.java
 Tue Sep 25 17:26:18 2012
@@ -35,6 +35,7 @@ import org.junit.*;
  * join back.
  */
 
+@Ignore
 public class TestJobTrackerSafeMode extends TestCase {
   final Path testDir = 
 new Path(System.getProperty(test.build.data, /tmp), jt-safemode);
@@ -197,7 +198,7 @@ public class TestJobTrackerSafeMode exte
 
   private boolean checkTrackers(JobTracker jobtracker, SetString present, 
 SetString absent) {
-while (jobtracker.getClusterStatus(true).getActiveTrackerNames().size() != 
2) {
+while (jobtracker.getClusterStatus(true).getActiveTrackerNames().size() != 
3) {
   LOG.info(Waiting for Initialize all Task Trackers);
   UtilsForTests.waitFor(1000);
 }




svn commit: r1390011 - /hadoop/common/branches/branch-1/CHANGES.txt

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 17:33:41 2012
New Revision: 1390011

URL: http://svn.apache.org/viewvc?rev=1390011view=rev
Log:
MAPREDUCE-4328. Merging to branch-1.1.

Modified:
hadoop/common/branches/branch-1/CHANGES.txt

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1390011r1=1390010r2=1390011view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue Sep 25 17:33:41 2012
@@ -23,13 +23,6 @@ Release 1.2.0 - unreleased
 HADOOP-7754. Expose file descriptors from Hadoop-wrapped local 
 FileSystems (todd and ahmed via tucu)
 
-MAPREDUCE-4328. Add a JobTracker safemode to allow it to be resilient to
-NameNode failures. The safemode can be entered either automatically via
-the configurable background thread to monitor the NameNode or by the
-admin. In the safemode the JobTracker doesn't schedule new tasks, marks
-all failed tasks as KILLED for future retries and doesn't accept new job
-submissions. (acmurthy)
-
 MAPREDUCE-461. Enable service-plugins for JobTracker. (Fredrik Hedberg and
 Brandon Li via vinodkv)
 
@@ -287,6 +280,13 @@ Release 1.1.0 - unreleased
 configured timeout and are selected as the last location to read from.
 (Jing Zhao via szetszwo)
 
+MAPREDUCE-4328. Add a JobTracker safemode to allow it to be resilient to
+NameNode failures. The safemode can be entered either automatically via
+the configurable background thread to monitor the NameNode or by the
+admin. In the safemode the JobTracker doesn't schedule new tasks, marks
+all failed tasks as KILLED for future retries and doesn't accept new job
+submissions. (acmurthy)
+
   IMPROVEMENTS
 
 HADOOP-8656. Backport forced daemon shutdown of HADOOP-8353 into branch-1




svn commit: r1390016 - in /hadoop/common/branches/branch-1.1: ./ src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/ src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/ src

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 17:46:34 2012
New Revision: 1390016

URL: http://svn.apache.org/viewvc?rev=1390016view=rev
Log:
Merge -c 1377714 from branch-1 to branch-1.1 to fix MAPREDUCE-4328. Add a 
JobTracker safemode to allow it to be resilient to NameNode failures.

Added:

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/HDFSMonitorThread.java
  - copied unchanged from r1377714, 
hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/HDFSMonitorThread.java

hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/mapred/TestJobTrackerQuiescence.java
  - copied unchanged from r1377714, 
hadoop/common/branches/branch-1/src/test/org/apache/hadoop/mapred/TestJobTrackerQuiescence.java
Modified:
hadoop/common/branches/branch-1.1/CHANGES.txt

hadoop/common/branches/branch-1.1/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java

hadoop/common/branches/branch-1.1/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacityScheduler.java

hadoop/common/branches/branch-1.1/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FairScheduler.java

hadoop/common/branches/branch-1.1/src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/TestFairScheduler.java

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/AdminOperationsProtocol.java

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/AuditLogger.java

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/JobQueueTaskScheduler.java

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/JobTracker.java

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/TaskInProgress.java

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/TaskTracker.java

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/TaskTrackerManager.java

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/tools/MRAdmin.java

hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/mapred/TestJobQueueTaskScheduler.java

hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/mapred/TestParallelInitialization.java
hadoop/common/branches/branch-1.1/src/webapps/job/jobtracker.jsp

Modified: hadoop/common/branches/branch-1.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/CHANGES.txt?rev=1390016r1=1390015r2=1390016view=diff
==
--- hadoop/common/branches/branch-1.1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.1/CHANGES.txt Tue Sep 25 17:46:34 2012
@@ -54,6 +54,13 @@ Release 1.1.0 - 2012.09.16
 configured timeout and are selected as the last location to read from.
 (Jing Zhao via szetszwo)
 
+MAPREDUCE-4328. Add a JobTracker safemode to allow it to be resilient to
+NameNode failures. The safemode can be entered either automatically via
+the configurable background thread to monitor the NameNode or by the
+admin. In the safemode the JobTracker doesn't schedule new tasks, marks
+all failed tasks as KILLED for future retries and doesn't accept new job
+submissions. (acmurthy)
+
   IMPROVEMENTS
 
 HADOOP-8656. Backport forced daemon shutdown of HADOOP-8353 into branch-1

Modified: 
hadoop/common/branches/branch-1.1/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java?rev=1390016r1=1390015r2=1390016view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java
 Tue Sep 25 17:46:34 2012
@@ -1042,10 +1042,20 @@ class CapacityTaskScheduler extends Task
  */ 
 updateAllQueues(mapClusterCapacity, reduceClusterCapacity);
 
-// schedule tasks
+/*
+ * Schedule tasks
+ */
+
 ListTask result = new ArrayListTask();
-addMapTasks(taskTracker, result, maxMapSlots, currentMapSlots);
-addReduceTask(taskTracker, result, maxReduceSlots, currentReduceSlots);
+
+// Check for JT safe-mode
+if (taskTrackerManager.isInSafeMode()) {
+  LOG.info(JobTracker is in safe-mode, not scheduling any tasks.);
+} else {
+  addMapTasks(taskTracker, result, maxMapSlots, currentMapSlots);
+  addReduceTask(taskTracker, result, maxReduceSlots, currentReduceSlots);
+}
+
 return result;
   }
 

Modified: 

svn commit: r1390021 - /hadoop/common/branches/branch-1/CHANGES.txt

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 17:53:14 2012
New Revision: 1390021

URL: http://svn.apache.org/viewvc?rev=1390021view=rev
Log:
HADOOP-8617. Merging to branch-1.1.

Modified:
hadoop/common/branches/branch-1/CHANGES.txt

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1390021r1=1390020r2=1390021view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue Sep 25 17:53:14 2012
@@ -53,9 +53,6 @@ Release 1.2.0 - unreleased
 MAPREDUCE-4415. Backport the Job.getInstance methods from
 MAPREDUCE-1505 to branch-1. (harsh)
 
-HADOOP-8617. Backport HADOOP-6148, HADOOP-6166 and HADOOP-7333 for a pure
-Java CRC32 calculator implementation.  (Brandon Li via szetszwo)
-
 HDFS-496. Backport: Use PureJavaCrc32 in HDFS.  (Brandon Li via szetszwo)
 
 MAPREDUCE-782. Use PureJavaCrc32 in mapreduce spills. 
@@ -402,6 +399,9 @@ Release 1.1.0 - unreleased
 JobTracker for smaller clusters. (Todd Lipcon, backport by 
 Brandon Li via sseth)
 
+HADOOP-8617. Backport HADOOP-6148, HADOOP-6166 and HADOOP-7333 for a pure
+Java CRC32 calculator implementation.  (Brandon Li via szetszwo)
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations




svn commit: r1390022 - in /hadoop/common/branches/branch-1.1: ./ src/core/org/apache/hadoop/fs/ src/core/org/apache/hadoop/util/ src/test/org/apache/hadoop/util/

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 17:54:38 2012
New Revision: 1390022

URL: http://svn.apache.org/viewvc?rev=1390022view=rev
Log:
Merge -c 1365591 from branch-1 to branch-1.1 to fix HADOOP-8617. Backport 
HADOOP-6148, HADOOP-6166 and HADOOP-7333 for a pure Java CRC32 calculator 
implementation.

Added:

hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/util/PureJavaCrc32.java
  - copied unchanged from r1365591, 
hadoop/common/branches/branch-1/src/core/org/apache/hadoop/util/PureJavaCrc32.java

hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/util/TestPureJavaCrc32.java
  - copied unchanged from r1365591, 
hadoop/common/branches/branch-1/src/test/org/apache/hadoop/util/TestPureJavaCrc32.java
Modified:
hadoop/common/branches/branch-1.1/CHANGES.txt

hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/fs/ChecksumFileSystem.java

hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/util/DataChecksum.java

Modified: hadoop/common/branches/branch-1.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/CHANGES.txt?rev=1390022r1=1390021r2=1390022view=diff
==
--- hadoop/common/branches/branch-1.1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.1/CHANGES.txt Tue Sep 25 17:54:38 2012
@@ -179,6 +179,9 @@ Release 1.1.0 - 2012.09.16
 JobTracker for smaller clusters. (Todd Lipcon, backport by 
 Brandon Li via sseth)
 
+HADOOP-8617. Backport HADOOP-6148, HADOOP-6166 and HADOOP-7333 for a pure
+Java CRC32 calculator implementation.  (Brandon Li via szetszwo)
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations

Modified: 
hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/fs/ChecksumFileSystem.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/fs/ChecksumFileSystem.java?rev=1390022r1=1390021r2=1390022view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/fs/ChecksumFileSystem.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/fs/ChecksumFileSystem.java
 Tue Sep 25 17:54:38 2012
@@ -20,13 +20,13 @@ package org.apache.hadoop.fs;
 
 import java.io.*;
 import java.util.Arrays;
-import java.util.zip.CRC32;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.PureJavaCrc32;
 import org.apache.hadoop.util.StringUtils;
 
 /
@@ -135,7 +135,7 @@ public abstract class ChecksumFileSystem
 if (!Arrays.equals(version, CHECKSUM_VERSION))
   throw new IOException(Not a checksum file: +sumFile);
 this.bytesPerSum = sums.readInt();
-set(fs.verifyChecksum, new CRC32(), bytesPerSum, 4);
+set(fs.verifyChecksum, new PureJavaCrc32(), bytesPerSum, 4);
   } catch (FileNotFoundException e) { // quietly ignore
 set(fs.verifyChecksum, null, 1, 0);
   } catch (IOException e) {   // loudly ignore
@@ -330,7 +330,7 @@ public abstract class ChecksumFileSystem
   long blockSize,
   Progressable progress)
   throws IOException {
-  super(new CRC32(), fs.getBytesPerSum(), 4);
+  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);
   int bytesPerSum = fs.getBytesPerSum();
   this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, 
  replication, blockSize, progress);

Modified: 
hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/util/DataChecksum.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/util/DataChecksum.java?rev=1390022r1=1390021r2=1390022view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/util/DataChecksum.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/util/DataChecksum.java
 Tue Sep 25 17:54:38 2012
@@ -19,8 +19,6 @@
 package org.apache.hadoop.util;
 
 import java.util.zip.Checksum;
-import java.util.zip.CRC32;
-
 import java.io.*;
 
 /**
@@ -51,7 +49,7 @@ public class DataChecksum implements Che
   return new DataChecksum( CHECKSUM_NULL, new ChecksumNull(), 
CHECKSUM_NULL_SIZE, bytesPerChecksum );
 case CHECKSUM_CRC32 :
-  return new DataChecksum( CHECKSUM_CRC32, new CRC32(), 
+  return new DataChecksum( CHECKSUM_CRC32, new PureJavaCrc32(), 
CHECKSUM_CRC32_SIZE, 

svn commit: r1390024 - /hadoop/common/branches/branch-1/CHANGES.txt

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 17:56:04 2012
New Revision: 1390024

URL: http://svn.apache.org/viewvc?rev=1390024view=rev
Log:
HDFS-496  MAPREDUCE-782. Merging to branch-1.1.

Modified:
hadoop/common/branches/branch-1/CHANGES.txt

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1390024r1=1390023r2=1390024view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue Sep 25 17:56:04 2012
@@ -53,11 +53,6 @@ Release 1.2.0 - unreleased
 MAPREDUCE-4415. Backport the Job.getInstance methods from
 MAPREDUCE-1505 to branch-1. (harsh)
 
-HDFS-496. Backport: Use PureJavaCrc32 in HDFS.  (Brandon Li via szetszwo)
-
-MAPREDUCE-782. Use PureJavaCrc32 in mapreduce spills. 
-(Todd Lipcon, backport by Brandon Li via sseth)
-
 HDFS-3667.  Add retry support to WebHdfsFileSystem.  (szetszwo)
 
 HDFS-3697. Enable fadvise readahead by default. (todd via eli)
@@ -402,6 +397,11 @@ Release 1.1.0 - unreleased
 HADOOP-8617. Backport HADOOP-6148, HADOOP-6166 and HADOOP-7333 for a pure
 Java CRC32 calculator implementation.  (Brandon Li via szetszwo)
 
+HDFS-496. Backport: Use PureJavaCrc32 in HDFS.  (Brandon Li via szetszwo)
+
+MAPREDUCE-782. Use PureJavaCrc32 in mapreduce spills. 
+(Todd Lipcon, backport by Brandon Li via sseth)
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations




svn commit: r1390027 - in /hadoop/common/branches/branch-1.1: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/DFSClient.java src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 17:57:21 2012
New Revision: 1390027

URL: http://svn.apache.org/viewvc?rev=1390027view=rev
Log:
Merge -c 1365881 from branch-1 to branch-1.1 to fix HDFS-496. Backport: Use 
PureJavaCrc32 in HDFS.

Modified:
hadoop/common/branches/branch-1.1/CHANGES.txt

hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java

hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java

Modified: hadoop/common/branches/branch-1.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/CHANGES.txt?rev=1390027r1=1390026r2=1390027view=diff
==
--- hadoop/common/branches/branch-1.1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.1/CHANGES.txt Tue Sep 25 17:57:21 2012
@@ -182,6 +182,8 @@ Release 1.1.0 - 2012.09.16
 HADOOP-8617. Backport HADOOP-6148, HADOOP-6166 and HADOOP-7333 for a pure
 Java CRC32 calculator implementation.  (Brandon Li via szetszwo)
 
+HDFS-496. Backport: Use PureJavaCrc32 in HDFS.  (Brandon Li via szetszwo)
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations

Modified: 
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=1390027r1=1390026r2=1390027view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
 Tue Sep 25 17:57:21 2012
@@ -54,7 +54,6 @@ import org.apache.commons.logging.*;
 import java.io.*;
 import java.net.*;
 import java.util.*;
-import java.util.zip.CRC32;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.ConcurrentHashMap;
 import java.nio.BufferOverflowException;
@@ -3393,7 +3392,7 @@ public class DFSClient implements FSCons
 
 private DFSOutputStream(String src, long blockSize, Progressable progress,
 int bytesPerChecksum, short replication) throws IOException {
-  super(new CRC32(), bytesPerChecksum, 4);
+  super(new PureJavaCrc32(), bytesPerChecksum, 4);
   this.src = src;
   this.blockSize = blockSize;
   this.blockReplication = replication;

Modified: 
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1390027r1=1390026r2=1390027view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 Tue Sep 25 17:57:21 2012
@@ -27,7 +27,6 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.nio.ByteBuffer;
 import java.util.LinkedList;
-import java.util.zip.CRC32;
 import java.util.zip.Checksum;
 
 import org.apache.commons.logging.Log;
@@ -43,6 +42,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.PureJavaCrc32;
 import org.apache.hadoop.util.StringUtils;
 import static 
org.apache.hadoop.hdfs.server.datanode.DataNode.DN_CLIENTTRACE_FORMAT;
 
@@ -718,7 +718,7 @@ class BlockReceiver implements java.io.C
 }
 
 // compute crc of partial chunk from data read in the block file.
-partialCrc = new CRC32();
+partialCrc = new PureJavaCrc32();
 partialCrc.update(buf, 0, sizePartialChunk);
 LOG.info(Read in partial CRC chunk from disk for block  + block);
 




svn commit: r1390029 - in /hadoop/common/branches/branch-1.1: CHANGES.txt src/mapred/org/apache/hadoop/mapred/SpillRecord.java

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 17:59:51 2012
New Revision: 1390029

URL: http://svn.apache.org/viewvc?rev=1390029view=rev
Log:
Merge -c 1366280 from branch-1 to branch-1.1 to fix MAPREDUCE-782. Use 
PureJavaCrc32 in mapreduce spills.

Modified:
hadoop/common/branches/branch-1.1/CHANGES.txt

hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/SpillRecord.java

Modified: hadoop/common/branches/branch-1.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/CHANGES.txt?rev=1390029r1=1390028r2=1390029view=diff
==
--- hadoop/common/branches/branch-1.1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.1/CHANGES.txt Tue Sep 25 17:59:51 2012
@@ -184,6 +184,9 @@ Release 1.1.0 - 2012.09.16
 
 HDFS-496. Backport: Use PureJavaCrc32 in HDFS.  (Brandon Li via szetszwo)
 
+MAPREDUCE-782. Use PureJavaCrc32 in mapreduce spills. 
+(Todd Lipcon, backport by Brandon Li via sseth)
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations

Modified: 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/SpillRecord.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/SpillRecord.java?rev=1390029r1=1390028r2=1390029view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/SpillRecord.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/mapred/org/apache/hadoop/mapred/SpillRecord.java
 Tue Sep 25 17:59:51 2012
@@ -22,7 +22,6 @@ import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.LongBuffer;
-import java.util.zip.CRC32;
 import java.util.zip.CheckedInputStream;
 import java.util.zip.CheckedOutputStream;
 import java.util.zip.Checksum;
@@ -33,6 +32,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SecureIOUtils;
+import org.apache.hadoop.util.PureJavaCrc32;
 
 import static org.apache.hadoop.mapred.MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH;
 
@@ -51,7 +51,7 @@ class SpillRecord {
 
   public SpillRecord(Path indexFileName, JobConf job, String 
expectedIndexOwner)
   throws IOException {
-this(indexFileName, job, new CRC32(), expectedIndexOwner);
+this(indexFileName, job, new PureJavaCrc32(), expectedIndexOwner);
   }
 
   public SpillRecord(Path indexFileName, JobConf job, Checksum crc, 
@@ -115,7 +115,7 @@ class SpillRecord {
*/
   public void writeToFile(Path loc, JobConf job)
   throws IOException {
-writeToFile(loc, job, new CRC32());
+writeToFile(loc, job, new PureJavaCrc32());
   }
 
   public void writeToFile(Path loc, JobConf job, Checksum crc)




svn commit: r1390030 - /hadoop/common/branches/branch-1/CHANGES.txt

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 18:01:11 2012
New Revision: 1390030

URL: http://svn.apache.org/viewvc?rev=1390030view=rev
Log:
HADOOP-8748. Merging to branch-1.1.

Modified:
hadoop/common/branches/branch-1/CHANGES.txt

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1390030r1=1390029r2=1390030view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue Sep 25 18:01:11 2012
@@ -72,9 +72,6 @@ Release 1.2.0 - unreleased
 MAPREDUCE-4499. Looking for speculative tasks is very expensive in 1.x
 (Koji Noguchi via tgraves)
 
-HADOOP-8748. Refactor DFSClient retry utility methods to a new class in
-org.apache.hadoop.io.retry.  Contributed by Arun C Murthy.
-
 HDFS-3871. Change DFSClient to use RetryUtils.  (Arun C Murthy
 via szetszwo)
 
@@ -402,6 +399,9 @@ Release 1.1.0 - unreleased
 MAPREDUCE-782. Use PureJavaCrc32 in mapreduce spills. 
 (Todd Lipcon, backport by Brandon Li via sseth)
 
+HADOOP-8748. Refactor DFSClient retry utility methods to a new class in
+org.apache.hadoop.io.retry.  Contributed by Arun C Murthy.
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations




svn commit: r1390031 - in /hadoop/common/branches/branch-1.1: CHANGES.txt src/core/org/apache/hadoop/io/retry/RetryUtils.java

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 18:02:30 2012
New Revision: 1390031

URL: http://svn.apache.org/viewvc?rev=1390031view=rev
Log:
Merge -c 1379203 from branch-1 to branch-1.1 to fix HADOOP-8748. Refactor 
DFSClient retry utility methods to a new class in org.apache.hadoop.io.retry.

Added:

hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/io/retry/RetryUtils.java
  - copied unchanged from r1379203, 
hadoop/common/branches/branch-1/src/core/org/apache/hadoop/io/retry/RetryUtils.java
Modified:
hadoop/common/branches/branch-1.1/CHANGES.txt

Modified: hadoop/common/branches/branch-1.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/CHANGES.txt?rev=1390031r1=1390030r2=1390031view=diff
==
--- hadoop/common/branches/branch-1.1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.1/CHANGES.txt Tue Sep 25 18:02:30 2012
@@ -187,6 +187,9 @@ Release 1.1.0 - 2012.09.16
 MAPREDUCE-782. Use PureJavaCrc32 in mapreduce spills. 
 (Todd Lipcon, backport by Brandon Li via sseth)
 
+HADOOP-8748. Refactor DFSClient retry utility methods to a new class in
+org.apache.hadoop.io.retry.  Contributed by Arun C Murthy.
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations




svn commit: r1390032 - /hadoop/common/branches/branch-1/CHANGES.txt

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 18:04:24 2012
New Revision: 1390032

URL: http://svn.apache.org/viewvc?rev=1390032view=rev
Log:
HDFS-3871. Merging to branch-1.1.

Modified:
hadoop/common/branches/branch-1/CHANGES.txt

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1390032r1=1390031r2=1390032view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue Sep 25 18:04:24 2012
@@ -72,9 +72,6 @@ Release 1.2.0 - unreleased
 MAPREDUCE-4499. Looking for speculative tasks is very expensive in 1.x
 (Koji Noguchi via tgraves)
 
-HDFS-3871. Change DFSClient to use RetryUtils.  (Arun C Murthy
-via szetszwo)
-
 HDFS-1108 Log newly allocated blocks (hdfs-1108-hadoop-1-v5.patch) 
(sanjay) 
 
 HADOOP-8832. Port generic service plugin mechanism from HADOOP-5257
@@ -402,6 +399,9 @@ Release 1.1.0 - unreleased
 HADOOP-8748. Refactor DFSClient retry utility methods to a new class in
 org.apache.hadoop.io.retry.  Contributed by Arun C Murthy.
 
+HDFS-3871. Change DFSClient to use RetryUtils.  (Arun C Murthy
+via szetszwo)
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations




svn commit: r1390038 - in /hadoop/common/branches/branch-1.1: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/DFSClient.java src/hdfs/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 18:21:41 2012
New Revision: 1390038

URL: http://svn.apache.org/viewvc?rev=1390038view=rev
Log:
Merge -c 1379746 from branch-1 to branch-1.1 to fix HDFS-3871. Change DFSClient 
to use RetryUtils.

Modified:
hadoop/common/branches/branch-1.1/CHANGES.txt

hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java

hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

Modified: hadoop/common/branches/branch-1.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/CHANGES.txt?rev=1390038r1=1390037r2=1390038view=diff
==
--- hadoop/common/branches/branch-1.1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.1/CHANGES.txt Tue Sep 25 18:21:41 2012
@@ -192,6 +192,9 @@ Release 1.1.0 - 2012.09.16
 HADOOP-8748. Refactor DFSClient retry utility methods to a new class in
 org.apache.hadoop.io.retry.  Contributed by Arun C Murthy.
 
+HDFS-3871. Change DFSClient to use RetryUtils.  (Arun C Murthy
+via szetszwo)
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations

Modified: 
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=1390038r1=1390037r2=1390038view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
 Tue Sep 25 18:21:41 2012
@@ -21,6 +21,7 @@ import org.apache.hadoop.io.*;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryProxy;
+import org.apache.hadoop.io.retry.RetryUtils;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.ipc.*;
@@ -124,98 +125,28 @@ public class DFSClient implements FSCons
 return (ClientProtocol)RPC.getProxy(ClientProtocol.class,
 ClientProtocol.versionID, nameNodeAddr, ugi, conf,
 NetUtils.getSocketFactory(conf, ClientProtocol.class), 0,
-getMultipleLinearRandomRetry(conf));
-  }
-
-  /**
-   * Return the default retry policy used in RPC.
-   * 
-   * If dfs.client.retry.policy.enabled == false, use TRY_ONCE_THEN_FAIL.
-   * 
-   * Otherwise, 
-   * (1) use multipleLinearRandomRetry for
-   * - SafeModeException, or
-   * - IOException other than RemoteException; and
-   * (2) use TRY_ONCE_THEN_FAIL for
-   * - non-SafeMode RemoteException, or
-   * - non-IOException.
-   * 
-   * Note that dfs.client.retry.max  0 is not allowed.
-   */
-  public static RetryPolicy getDefaultRetryPolicy(Configuration conf) {
-final RetryPolicy multipleLinearRandomRetry = 
getMultipleLinearRandomRetry(conf);
-if (LOG.isDebugEnabled()) {
-  LOG.debug(multipleLinearRandomRetry =  + multipleLinearRandomRetry);
-}
-
-if (multipleLinearRandomRetry == null) {
-  //no retry
-  return RetryPolicies.TRY_ONCE_THEN_FAIL;
-} else {
-  //use exponential backoff
-  return new RetryPolicy() {
-@Override
-public boolean shouldRetry(Exception e, int retries) throws Exception {
-  //see (1) and (2) in the javadoc of this method.
-  final RetryPolicy p;
-  if (e instanceof RemoteException) {
-final RemoteException re = (RemoteException)e;
-p = SafeModeException.class.getName().equals(re.getClassName())?
-multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL;
-  } else if (e instanceof IOException) {
-p = multipleLinearRandomRetry;
-  } else { //non-IOException
-p = RetryPolicies.TRY_ONCE_THEN_FAIL;
-  }
-
-  if (LOG.isDebugEnabled()) {
-LOG.debug(RETRY  + retries + ) policy=
-+ p.getClass().getSimpleName() + , exception= + e);
-  }
-  return p.shouldRetry(e, retries);
-}
-
-@Override
-public String toString() {
-  return RetryPolicy[ + multipleLinearRandomRetry + , 
-  + RetryPolicies.TRY_ONCE_THEN_FAIL.getClass().getSimpleName()
-  + ];
-}
-  };
-}
-  }
-
-  /**
-   * Return the MultipleLinearRandomRetry policy specified in the conf,
-   * or null if the feature is disabled.
-   * If the policy is specified in the conf but the policy cannot be parsed,
-   * the default policy is returned.
-   * 
-   * Conf property: N pairs of sleep-time and number-of-retries
-   *   dfs.client.retry.policy = s1,n1,s2,n2,...
-   */
-  private static RetryPolicy getMultipleLinearRandomRetry(Configuration conf) {
-final 

svn commit: r1390041 - in /hadoop/common/branches/branch-1: ./ src/mapred/org/apache/hadoop/mapred/ src/test/org/apache/hadoop/mapred/

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 18:25:45 2012
New Revision: 1390041

URL: http://svn.apache.org/viewvc?rev=1390041view=rev
Log:
MAPREDUCE-4603. Add support for JobClient to retry job-submission when 
JobTracker is in SafeMode. Contributed by Arun C. Murthy.

Added:

hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/SafeModeException.java

hadoop/common/branches/branch-1/src/test/org/apache/hadoop/mapred/TestJobClientRetries.java
Modified:
hadoop/common/branches/branch-1/CHANGES.txt

hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/JobClient.java

hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/JobTracker.java

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1390041r1=1390040r2=1390041view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue Sep 25 18:25:45 2012
@@ -394,14 +394,15 @@ Release 1.1.0 - unreleased
 MAPREDUCE-782. Use PureJavaCrc32 in mapreduce spills. 
 (Todd Lipcon, backport by Brandon Li via sseth)
 
-HDFS-3667.  Add retry support to WebHdfsFileSystem.  (szetszwo)
-
 HADOOP-8748. Refactor DFSClient retry utility methods to a new class in
 org.apache.hadoop.io.retry.  Contributed by Arun C Murthy.
 
 HDFS-3871. Change DFSClient to use RetryUtils.  (Arun C Murthy
 via szetszwo)
 
+MAPREDUCE-4603. Add support for JobClient to retry job-submission when
+JobTracker is in SafeMode. (acmurthy)
+
   BUG FIXES
 
 HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations

Modified: 
hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/JobClient.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/JobClient.java?rev=1390041r1=1390040r2=1390041view=diff
==
--- 
hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/JobClient.java
 (original)
+++ 
hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/JobClient.java
 Tue Sep 25 18:25:45 2012
@@ -40,6 +40,7 @@ import java.security.PrivilegedException
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Comparator;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -59,6 +60,10 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryProxy;
+import org.apache.hadoop.io.retry.RetryUtils;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.mapred.Counters.Counter;
@@ -434,7 +439,9 @@ public class JobClient extends Configure
 }
   }
 
+  private JobSubmissionProtocol rpcJobSubmitClient;
   private JobSubmissionProtocol jobSubmitClient;
+  
   private Path sysDir = null;
   private Path stagingAreaDir = null;
   
@@ -445,6 +452,15 @@ public class JobClient extends Configure
   private static final int DEFAULT_TASKLOG_TIMEOUT = 6;
   static int tasklogtimeout;
 
+  public static final String MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_KEY =
+  mapreduce.jobclient.retry.policy.enabled;
+  public static final boolean MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_DEFAULT = 
+  false;
+  public static final String MAPREDUCE_CLIENT_RETRY_POLICY_SPEC_KEY =
+  mapreduce.jobclient.retry.policy.spec;
+  public static final String MAPREDUCE_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
+  1,6,6,10; //t1,n1,t2,n2,...
+  
   /**
* Create a job client.
*/
@@ -477,16 +493,61 @@ public class JobClient extends Configure
   conf.setNumMapTasks(1);
   this.jobSubmitClient = new LocalJobRunner(conf);
 } else {
-  this.jobSubmitClient = createRPCProxy(JobTracker.getAddress(conf), conf);
+  this.rpcJobSubmitClient = 
+  createRPCProxy(JobTracker.getAddress(conf), conf);
+  this.jobSubmitClient = createProxy(this.rpcJobSubmitClient, conf);
 }
   }
 
   private static JobSubmissionProtocol createRPCProxy(InetSocketAddress addr,
   Configuration conf) throws IOException {
-return (JobSubmissionProtocol) RPC.getProxy(JobSubmissionProtocol.class,
-JobSubmissionProtocol.versionID, addr, 
-UserGroupInformation.getCurrentUser(), conf,
-NetUtils.getSocketFactory(conf, JobSubmissionProtocol.class));
+
+JobSubmissionProtocol rpcJobSubmitClient = 
+(JobSubmissionProtocol)RPC.getProxy(
+JobSubmissionProtocol.class,
+JobSubmissionProtocol.versionID, addr, 
+

svn commit: r1390111 - in /hadoop/common/branches/branch-1: CHANGES.txt src/docs/src/documentation/content/xdocs/file_system_shell.xml

2012-09-25 Thread suresh
Author: suresh
Date: Tue Sep 25 20:13:11 2012
New Revision: 1390111

URL: http://svn.apache.org/viewvc?rev=1390111view=rev
Log:
HADOOP-8791. Fix rm command documentation to indicte it deletes files and not 
directories. Contributed by Jing Zhao.

Modified:
hadoop/common/branches/branch-1/CHANGES.txt

hadoop/common/branches/branch-1/src/docs/src/documentation/content/xdocs/file_system_shell.xml

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1390111r1=1390110r2=1390111view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue Sep 25 20:13:11 2012
@@ -220,6 +220,9 @@ Release 1.2.0 - unreleased
 MAPREDUCE-4662.  JobHistoryFilesManager thread pool never expands 
 (Kihwal Lee via tgraves)
 
+HADOOP-8791. Fix rm command documentation to indicte it deletes
+files and not directories. (Jing Zhao via suresh)
+
 Release 1.1.0 - unreleased
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/branches/branch-1/src/docs/src/documentation/content/xdocs/file_system_shell.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/docs/src/documentation/content/xdocs/file_system_shell.xml?rev=1390111r1=1390110r2=1390111view=diff
==
--- 
hadoop/common/branches/branch-1/src/docs/src/documentation/content/xdocs/file_system_shell.xml
 (original)
+++ 
hadoop/common/branches/branch-1/src/docs/src/documentation/content/xdocs/file_system_shell.xml
 Tue Sep 25 20:13:11 2012
@@ -412,7 +412,7 @@
codeUsage: hdfs dfs -rm [-skipTrash] URI [URI 
#x2026;] /code
/p
p
-  Delete files specified as args. Only deletes non empty directory and 
files. If the code-skipTrash/code option
+  Delete files specified as args. Only deletes files. If the 
code-skipTrash/code option
   is specified, the trash, if enabled, will be bypassed and the 
specified file(s) deleted immediately. This can be
   useful when it is necessary to delete files from an 
over-quota directory.
   Refer to rmr for recursive deletes.br/
@@ -420,7 +420,7 @@
   /p
ul
li
-   code hdfs dfs -rm 
hdfs://nn.example.com/file /user/hadoop/emptydir /code
+   code hdfs dfs -rm 
hdfs://nn.example.com/file /code
/li
/ul
pExit Code:/p
@@ -436,7 +436,7 @@
p
codeUsage: hdfs dfs -rmr [-skipTrash] URI 
[URI #x2026;]/code
/p
-   pRecursive version of delete. If the 
code-skipTrash/code option
+   pRecursive version of delete. The rmr command 
recursively deletes the directory and any content under it. If the 
code-skipTrash/code option
   is specified, the trash, if enabled, will be bypassed and 
the specified file(s) deleted immediately. This can be
   useful when it is necessary to delete files from an 
over-quota directory.br/
   Example:




svn commit: r1390129 - /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

2012-09-25 Thread bobby
Author: bobby
Date: Tue Sep 25 20:56:40 2012
New Revision: 1390129

URL: http://svn.apache.org/viewvc?rev=1390129view=rev
Log:
HADOOP-8840. Fix the test-patch colorizer to cover all sorts of +1 lines. 
(Harsh J via bobby)

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1390129r1=1390128r2=1390129view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Tue Sep 
25 20:56:40 2012
@@ -114,6 +114,9 @@ Trunk (Unreleased)
 HADOOP-8838. Colorize the test-patch output sent to JIRA (Harsh J via
 bobby)
 
+HADOOP-8840. Fix the test-patch colorizer to cover all sorts of +1 lines.
+(Harsh J via bobby)
+
   BUG FIXES
 
 HADOOP-8177. MBeans shouldn't try to register when it fails to create 
MBeanName.




svn commit: r1390129 - /hadoop/common/trunk/dev-support/test-patch.sh

2012-09-25 Thread bobby
Author: bobby
Date: Tue Sep 25 20:56:40 2012
New Revision: 1390129

URL: http://svn.apache.org/viewvc?rev=1390129view=rev
Log:
HADOOP-8840. Fix the test-patch colorizer to cover all sorts of +1 lines. 
(Harsh J via bobby)

Modified:
hadoop/common/trunk/dev-support/test-patch.sh

Modified: hadoop/common/trunk/dev-support/test-patch.sh
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/dev-support/test-patch.sh?rev=1390129r1=1390128r2=1390129view=diff
==
--- hadoop/common/trunk/dev-support/test-patch.sh (original)
+++ hadoop/common/trunk/dev-support/test-patch.sh Tue Sep 25 20:56:40 2012
@@ -341,14 +341,14 @@ checkTests () {
 fi
 JIRA_COMMENT=$JIRA_COMMENT
 
--1 tests included.  The patch doesn't appear to include any new or 
modified tests.
+{color:red}-1 tests included{color}.  The patch doesn't appear to include 
any new or modified tests.
 Please justify why no new tests are needed for this 
patch.
 Also please list what manual steps were performed to 
verify this patch.
 return 1
   fi
   JIRA_COMMENT=$JIRA_COMMENT
 
-+1 tests included.  The patch appears to include $testReferences new or 
modified test files.
+{color:green}+1 tests included{color}.  The patch appears to include 
$testReferences new or modified test files.
   return 0
 }
 
@@ -498,7 +498,7 @@ checkReleaseAuditWarnings () {
   if [[ $patchReleaseAuditWarnings -gt 0 ]] ; then
 JIRA_COMMENT=$JIRA_COMMENT
 
--1 release audit.  The applied patch generated $patchReleaseAuditWarnings 
release audit warnings.
+{color:red}-1 release audit{color}.  The applied patch generated 
$patchReleaseAuditWarnings release audit warnings.
 $GREP '\!?' $PATCH_DIR/patchReleaseAuditWarnings.txt  
$PATCH_DIR/patchReleaseAuditProblems.txt
 echo Lines that start with ? in the release audit report indicate 
files that do not have an Apache license header.  
$PATCH_DIR/patchReleaseAuditProblems.txt
 JIRA_COMMENT_FOOTER=Release audit warnings: 
$BUILD_URL/artifact/trunk/patchprocess/patchReleaseAuditProblems.txt
@@ -509,7 +509,7 @@ $JIRA_COMMENT_FOOTER
   fi
   JIRA_COMMENT=$JIRA_COMMENT
 
-+1 release audit.  The applied patch does not increase the total number of 
release audit warnings.
+{color:green}+1 release audit{color}.  The applied patch does not increase 
the total number of release audit warnings.
   return 0
 }
 
@@ -655,12 +655,12 @@ checkEclipseGeneration () {
   if [[ $? != 0 ]] ; then
   JIRA_COMMENT=$JIRA_COMMENT
 
--1 eclipse:eclipse.  The patch failed to build with eclipse:eclipse.
+{color:red}-1 eclipse:eclipse{color}.  The patch failed to build with 
eclipse:eclipse.
 return 1
   fi
   JIRA_COMMENT=$JIRA_COMMENT
 
-+1 eclipse:eclipse.  The patch built with eclipse:eclipse.
+{color:green}+1 eclipse:eclipse{color}.  The patch built with 
eclipse:eclipse.
   return 0
 }
 
@@ -700,13 +700,13 @@ ${module_failed_tests}
   if [[ -n $failed_tests ]] ; then
 JIRA_COMMENT=$JIRA_COMMENT
 
--1 core tests.  The patch failed these unit tests in $modules:
+{color:red}-1 core tests{color}.  The patch failed these unit tests in 
$modules:
 $failed_tests
 return 1
   fi
   JIRA_COMMENT=$JIRA_COMMENT
 
-+1 core tests.  The patch passed unit tests in $modules.
+{color:green}+1 core tests{color}.  The patch passed unit tests in 
$modules.
   return 0
 }
 
@@ -782,12 +782,12 @@ runContribTests () {
   if [[ $? != 0 ]] ; then
 JIRA_COMMENT=$JIRA_COMMENT
 
--1 contrib tests.  The patch failed contrib unit tests.
+{color:red}-1 contrib tests{color}.  The patch failed contrib unit tests.
 return 1
   fi
   JIRA_COMMENT=$JIRA_COMMENT
 
-+1 contrib tests.  The patch passed contrib unit tests.
+{color:green}+1 contrib tests{color}.  The patch passed contrib unit 
tests.
   return 0
 }
 
@@ -814,12 +814,12 @@ checkInjectSystemFaults () {
   if [[ $? != 0 ]] ; then
 JIRA_COMMENT=$JIRA_COMMENT
 
--1 system test framework.  The patch failed system test framework compile.
+{color:red}-1 system test framework{color}.  The patch failed system test 
framework compile.
 return 1
   fi
   JIRA_COMMENT=$JIRA_COMMENT
 
-+1 system test framework.  The patch passed system test framework compile.
+{color:green}+1 system test framework{color}.  The patch passed system 
test framework compile.
   return 0
 }
 




svn commit: r1390133 - /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

2012-09-25 Thread bobby
Author: bobby
Date: Tue Sep 25 21:03:58 2012
New Revision: 1390133

URL: http://svn.apache.org/viewvc?rev=1390133view=rev
Log:
HADOOP-8822. relnotes.py was deleted post mavenization (bobby)

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1390133r1=1390132r2=1390133view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Tue Sep 
25 21:03:58 2012
@@ -984,6 +984,8 @@ Release 0.23.4 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-8822. relnotes.py was deleted post mavenization (bobby)
+
   OPTIMIZATIONS
 
   BUG FIXES




svn commit: r1390133 - /hadoop/common/trunk/dev-support/relnotes.py

2012-09-25 Thread bobby
Author: bobby
Date: Tue Sep 25 21:03:58 2012
New Revision: 1390133

URL: http://svn.apache.org/viewvc?rev=1390133view=rev
Log:
HADOOP-8822. relnotes.py was deleted post mavenization (bobby)

Added:
hadoop/common/trunk/dev-support/relnotes.py

Added: hadoop/common/trunk/dev-support/relnotes.py
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/dev-support/relnotes.py?rev=1390133view=auto
==
--- hadoop/common/trunk/dev-support/relnotes.py (added)
+++ hadoop/common/trunk/dev-support/relnotes.py Tue Sep 25 21:03:58 2012
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+#   Licensed under the Apache License, Version 2.0 (the License);
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an AS IS BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+import re
+import sys
+from optparse import OptionParser
+import httplib
+import urllib
+import cgi
+try:
+  import json
+except ImportError:
+  import simplejson as json
+
+
+namePattern = re.compile(r' \([0-9]+\)')
+
+def clean(str):
+  return quoteHtml(re.sub(namePattern, , str))
+
+def formatComponents(str):
+  str = re.sub(namePattern, '', str).replace(', )
+  if str != :
+ret = ( + str + )
+  else:
+ret = 
+  return quoteHtml(ret)
+
+def quoteHtml(str):
+  return cgi.escape(str).encode('ascii', 'xmlcharrefreplace')
+
+def mstr(obj):
+  if (obj == None):
+return 
+  return unicode(obj)
+
+class Version:
+  Represents a version number
+  def __init__(self, data):
+self.mod = False
+self.data = data
+found = re.match('^((\d+)(\.\d+)*).*$', data)
+if (found):
+  self.parts = [ int(p) for p in found.group(1).split('.') ]
+else:
+  self.parts = []
+# backfill version with zeroes if missing parts
+self.parts.extend((0,) * (3 - len(self.parts)))
+
+  def decBugFix(self):
+self.mod = True
+self.parts[2] -= 1
+return self
+
+  def __str__(self):
+if (self.mod):
+  return '.'.join([ str(p) for p in self.parts ])
+return self.data
+
+  def __cmp__(self, other):
+return cmp(self.parts, other.parts)
+
+class Jira:
+  A single JIRA
+
+  def __init__(self, data, parent):
+self.key = data['key']
+self.fields = data['fields']
+self.parent = parent
+self.notes = None
+
+  def getId(self):
+return mstr(self.key)
+
+  def getDescription(self):
+return mstr(self.fields['description'])
+
+  def getReleaseNote(self):
+if (self.notes == None):
+  field = self.parent.fieldIdMap['Release Note']
+  if (self.fields.has_key(field)):
+self.notes=mstr(self.fields[field])
+  else:
+self.notes=self.getDescription()
+return self.notes
+
+  def getPriority(self):
+ret = 
+pri = self.fields['priority']
+if(pri != None):
+  ret = pri['name']
+return mstr(ret)
+
+  def getAssignee(self):
+ret = 
+mid = self.fields['assignee']
+if(mid != None):
+  ret = mid['displayName']
+return mstr(ret)
+
+  def getComponents(self):
+return  , .join([ comp['name'] for comp in self.fields['components'] ])
+
+  def getSummary(self):
+return self.fields['summary']
+
+  def getType(self):
+ret = 
+mid = self.fields['issuetype']
+if(mid != None):
+  ret = mid['name']
+return mstr(ret)
+
+  def getReporter(self):
+ret = 
+mid = self.fields['reporter']
+if(mid != None):
+  ret = mid['displayName']
+return mstr(ret)
+
+  def getProject(self):
+ret = 
+mid = self.fields['project']
+if(mid != None):
+  ret = mid['key']
+return mstr(ret)
+
+
+
+class JiraIter:
+  An Iterator of JIRAs
+
+  def __init__(self, versions):
+self.versions = versions
+
+resp = urllib.urlopen(https://issues.apache.org/jira/rest/api/2/field;)
+data = json.loads(resp.read())
+
+self.fieldIdMap = {}
+for part in data:
+  self.fieldIdMap[part['name']] = part['id']
+
+self.jiras = []
+at=0
+end=1
+count=100
+while (at  end):
+  params = urllib.urlencode({'jql': project in 
(HADOOP,HDFS,MAPREDUCE,YARN) and fixVersion in ('+' , '.join(versions)+') 
and resolution = Fixed, 'startAt':at+1, 'maxResults':count})
+  resp = 
urllib.urlopen(https://issues.apache.org/jira/rest/api/2/search?%s%params)
+  data = json.loads(resp.read())
+  if (data.has_key('errorMessages')):
+raise Exception(data['errorMessages'])
+  at = data['startAt'] + data['maxResults']
+  end = data['total']
+  self.jiras.extend(data['issues'])
+
+self.iter = self.jiras.__iter__()
+
+  def __iter__(self):
+

svn commit: r1390138 - /hadoop/common/branches/branch-2/dev-support/relnotes.py

2012-09-25 Thread bobby
Author: bobby
Date: Tue Sep 25 21:08:40 2012
New Revision: 1390138

URL: http://svn.apache.org/viewvc?rev=1390138view=rev
Log:
svn merge -c 1390133 FIXES: HADOOP-8822. relnotes.py was deleted post 
mavenization (bobby)

Added:
hadoop/common/branches/branch-2/dev-support/relnotes.py
  - copied unchanged from r1390133, 
hadoop/common/trunk/dev-support/relnotes.py



svn commit: r1390142 - /hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt

2012-09-25 Thread bobby
Author: bobby
Date: Tue Sep 25 21:11:12 2012
New Revision: 1390142

URL: http://svn.apache.org/viewvc?rev=1390142view=rev
Log:
svn merge -c 1390133 FIXES: HADOOP-8822. relnotes.py was deleted post 
mavenization (bobby)

Modified:

hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt

Modified: 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1390142r1=1390141r2=1390142view=diff
==
--- 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt
 (original)
+++ 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt
 Tue Sep 25 21:11:12 2012
@@ -8,6 +8,8 @@ Release 0.23.4 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-8822. relnotes.py was deleted post mavenization (bobby)
+
   OPTIMIZATIONS
 
   BUG FIXES




svn commit: r1390142 - /hadoop/common/branches/branch-0.23/dev-support/relnotes.py

2012-09-25 Thread bobby
Author: bobby
Date: Tue Sep 25 21:11:12 2012
New Revision: 1390142

URL: http://svn.apache.org/viewvc?rev=1390142view=rev
Log:
svn merge -c 1390133 FIXES: HADOOP-8822. relnotes.py was deleted post 
mavenization (bobby)

Added:
hadoop/common/branches/branch-0.23/dev-support/relnotes.py
  - copied unchanged from r1390133, 
hadoop/common/trunk/dev-support/relnotes.py



svn commit: r1390156 - /hadoop/common/branches/branch-2.0.2-alpha/dev-support/relnotes.py

2012-09-25 Thread bobby
Author: bobby
Date: Tue Sep 25 21:29:29 2012
New Revision: 1390156

URL: http://svn.apache.org/viewvc?rev=1390156view=rev
Log:
svn merge -c 1390133. FIXES HADOOP-8822 relnotes.py (bobby)

Added:
hadoop/common/branches/branch-2.0.2-alpha/dev-support/relnotes.py
  - copied unchanged from r1390133, 
hadoop/common/trunk/dev-support/relnotes.py



svn commit: r1390156 - /hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/CHANGES.txt

2012-09-25 Thread bobby
Author: bobby
Date: Tue Sep 25 21:29:29 2012
New Revision: 1390156

URL: http://svn.apache.org/viewvc?rev=1390156view=rev
Log:
svn merge -c 1390133. FIXES HADOOP-8822 relnotes.py (bobby)

Modified:

hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/CHANGES.txt

Modified: 
hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1390156r1=1390155r2=1390156view=diff
==
--- 
hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/CHANGES.txt
 (original)
+++ 
hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/CHANGES.txt
 Tue Sep 25 21:29:29 2012
@@ -705,6 +705,8 @@ Release 0.23.4 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-8822. relnotes.py was deleted post mavenization (bobby)
+
   OPTIMIZATIONS
 
   BUG FIXES




[Hadoop Wiki] Update of HowToReleasePostMavenization by RobertEvans

2012-09-25 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The HowToReleasePostMavenization page has been changed by RobertEvans:
http://wiki.apache.org/hadoop/HowToReleasePostMavenization?action=diffrev1=36rev2=37

  svn co https://svn.apache.org/repos/asf/hadoop/common/branches/branch-X.Y
  }}}
1. Update {{{CHANGES.txt}}} to include the release version and date 
(this change must be committed to trunk and any intermediate branches between 
trunk and the branch being released).
-   1. Update {{{src/docs/releasenotes.html}}} with release notes for this 
release. You generate these with: {{{
+   1. Generate {{{releasenotes.html}}} with release notes for this 
release. You generate these with: {{{
+ python ./dev-support/relnotes.py -v $(vers)
+ }}} If you release includes more then one version you may add additional -v 
options for each version.  By default the previousVersion mentioned in the 
notes will be X.Y.Z-1, if this is not correct you can override this by setting 
the --previousVer option.
+ 1. Update {{{releasenotes.html}}} {{{
+ mv releasenotes.$(vers).html 
./hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
+ }}} Note that the script generates a set of notes for HDFS, HADOOP, 
MAPREDUCE, and YARN too, but only common is linked from the html documentation 
so the indavidual ones are ignored for now.
- cd src/docs
- jira.sh -s https://issues.apache.org/jira -u $user -p $pw \
--a getIssueList --search \
-project in (HADOOP,HDFS,MAPREDUCE) and fixVersion = '$vers' and 
resolution = Fixed \
-| ./relnotes.py  $vers.html
- }}} edit the releasenotes.html with the list of items from $vers.html. 
1. Update the version number in {{{build.xml}}} to be 
''hadoop-X.Y.N-dev'', where ''N'' is one greater than the release being made.
1. Commit these changes. {{{
  svn commit -m Preparing for release X.Y.Z


svn commit: r1390199 - in /hadoop/common/branches/HDFS-3077: ./ dev-support/relnotes.py dev-support/test-patch.sh

2012-09-25 Thread todd
Author: todd
Date: Tue Sep 25 22:43:04 2012
New Revision: 1390199

URL: http://svn.apache.org/viewvc?rev=1390199view=rev
Log:
Merge trunk into branch

Added:
hadoop/common/branches/HDFS-3077/dev-support/relnotes.py
  - copied unchanged from r1390198, 
hadoop/common/trunk/dev-support/relnotes.py
Modified:
hadoop/common/branches/HDFS-3077/   (props changed)
hadoop/common/branches/HDFS-3077/dev-support/test-patch.sh

Propchange: hadoop/common/branches/HDFS-3077/
--
  Merged /hadoop/common/trunk:r1387449-1390198

Modified: hadoop/common/branches/HDFS-3077/dev-support/test-patch.sh
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/dev-support/test-patch.sh?rev=1390199r1=1390198r2=1390199view=diff
==
--- hadoop/common/branches/HDFS-3077/dev-support/test-patch.sh (original)
+++ hadoop/common/branches/HDFS-3077/dev-support/test-patch.sh Tue Sep 25 
22:43:04 2012
@@ -250,7 +250,7 @@ verifyPatch () {
 echo PATCH APPLICATION FAILED
 JIRA_COMMENT=$JIRA_COMMENT
 
--1 patch.  The patch command could not apply the patch.
+{color:red}-1 patch{color}.  The patch command could not apply the patch.
 return 1
   else
 return 0
@@ -305,12 +305,12 @@ checkAuthor () {
   if [[ $authorTags != 0 ]] ; then
 JIRA_COMMENT=$JIRA_COMMENT
 
--1 @author.  The patch appears to contain $authorTags @author tags which 
the Hadoop community has agreed to not allow in code contributions.
+{color:red}-1 @author{color}.  The patch appears to contain $authorTags 
@author tags which the Hadoop community has agreed to not allow in code 
contributions.
 return 1
   fi
   JIRA_COMMENT=$JIRA_COMMENT
 
-+1 @author.  The patch does not contain any @author tags.
+{color:green}+1 @author{color}.  The patch does not contain any @author 
tags.
   return 0
 }
 
@@ -341,14 +341,14 @@ checkTests () {
 fi
 JIRA_COMMENT=$JIRA_COMMENT
 
--1 tests included.  The patch doesn't appear to include any new or 
modified tests.
+{color:red}-1 tests included{color}.  The patch doesn't appear to include 
any new or modified tests.
 Please justify why no new tests are needed for this 
patch.
 Also please list what manual steps were performed to 
verify this patch.
 return 1
   fi
   JIRA_COMMENT=$JIRA_COMMENT
 
-+1 tests included.  The patch appears to include $testReferences new or 
modified test files.
+{color:green}+1 tests included{color}.  The patch appears to include 
$testReferences new or modified test files.
   return 0
 }
 
@@ -379,7 +379,7 @@ applyPatch () {
 echo PATCH APPLICATION FAILED
 JIRA_COMMENT=$JIRA_COMMENT
 
--1 patch.  The patch command could not apply the patch.
+{color:red}-1 patch{color}.  The patch command could not apply the patch.
 return 1
   fi
   return 0
@@ -416,12 +416,12 @@ checkJavadocWarnings () {
   if [[ $javadocWarnings -ne $OK_JAVADOC_WARNINGS ]] ; then
 JIRA_COMMENT=$JIRA_COMMENT
 
--1 javadoc.  The javadoc tool appears to have generated `expr 
$(($javadocWarnings-$OK_JAVADOC_WARNINGS))` warning messages.
+{color:red}-1 javadoc{color}.  The javadoc tool appears to have generated 
`expr $(($javadocWarnings-$OK_JAVADOC_WARNINGS))` warning messages.
 return 1
   fi
   JIRA_COMMENT=$JIRA_COMMENT
 
-+1 javadoc.  The javadoc tool did not generate any warning messages.
+{color:green}+1 javadoc{color}.  The javadoc tool did not generate any 
warning messages.
   return 0
 }
 
@@ -442,7 +442,7 @@ checkJavacWarnings () {
   if [[ $? != 0 ]] ; then
 JIRA_COMMENT=$JIRA_COMMENT
 
--1 javac.  The patch appears to cause the build to fail.
+{color:red}-1 javac{color:red}.  The patch appears to cause the build to 
fail.
 return 2
   fi
   ### Compare trunk and patch javac warning numbers
@@ -456,7 +456,7 @@ checkJavacWarnings () {
   if [[ $patchJavacWarnings -gt $trunkJavacWarnings ]] ; then
 JIRA_COMMENT=$JIRA_COMMENT
 
--1 javac.  The applied patch generated $patchJavacWarnings javac compiler 
warnings (more than the trunk's current $trunkJavacWarnings warnings).
+  {color:red}-1 javac{color}.  The applied patch generated 
$patchJavacWarnings javac compiler warnings (more than the trunk's current 
$trunkJavacWarnings warnings).
 
 $DIFF $PATCH_DIR/filteredTrunkJavacWarnings.txt 
$PATCH_DIR/filteredPatchJavacWarnings.txt  $PATCH_DIR/diffJavacWarnings.txt 
 JIRA_COMMENT_FOOTER=Javac warnings: 
$BUILD_URL/artifact/trunk/patchprocess/diffJavacWarnings.txt
@@ -468,7 +468,7 @@ $JIRA_COMMENT_FOOTER
   fi
   JIRA_COMMENT=$JIRA_COMMENT
 
-+1 javac.  The applied patch does not increase the total number of javac 
compiler warnings.
+{color:green}+1 javac{color}.  The applied patch does not increase the 
total number of javac compiler warnings.
   return 0

svn commit: r1390210 - in /hadoop/common/trunk/hadoop-common-project/hadoop-common: CHANGES.txt src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java

2012-09-25 Thread todd
Author: todd
Date: Tue Sep 25 23:16:14 2012
New Revision: 1390210

URL: http://svn.apache.org/viewvc?rev=1390210view=rev
Log:
HADOOP-3957. Change MutableQuantiles to use a shared thread for rolling over 
metrics. Contributed by Andrew Wang.

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1390210r1=1390209r2=1390210view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Tue Sep 
25 23:16:14 2012
@@ -244,6 +244,9 @@ Trunk (Unreleased)
 required context item is not configured
 (Brahma Reddy Battula via harsh)
 
+HADOOP-3957. Change MutableQuantiles to use a shared thread for rolling
+over metrics. (Andrew Wang via todd)
+
   OPTIMIZATIONS
 
 HADOOP-7761. Improve the performance of raw comparisons. (todd)

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java?rev=1390210r1=1390209r2=1390210view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
 Tue Sep 25 23:16:14 2012
@@ -35,6 +35,7 @@ import org.apache.hadoop.metrics2.util.Q
 import org.apache.hadoop.metrics2.util.SampleQuantiles;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
  * Watches a stream of long values, maintaining online estimates of specific
@@ -60,8 +61,9 @@ public class MutableQuantiles extends Mu
   @VisibleForTesting
   protected MapQuantile, Long previousSnapshot = null;
 
-  private final ScheduledExecutorService scheduler = Executors
-  .newScheduledThreadPool(1);
+  private static final ScheduledExecutorService scheduler = Executors
+  .newScheduledThreadPool(1, new ThreadFactoryBuilder().setDaemon(true)
+  .setNameFormat(MutableQuantiles-%d).build());
 
   /**
* Instantiates a new {@link MutableQuantiles} for a metric that rolls itself




svn commit: r1390211 - in /hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common: CHANGES.txt src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java

2012-09-25 Thread todd
Author: todd
Date: Tue Sep 25 23:16:20 2012
New Revision: 1390211

URL: http://svn.apache.org/viewvc?rev=1390211view=rev
Log:
HADOOP-3957. Change MutableQuantiles to use a shared thread for rolling over 
metrics. Contributed by Andrew Wang.

Modified:

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1390211r1=1390210r2=1390211view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
Tue Sep 25 23:16:20 2012
@@ -22,6 +22,9 @@ Release 2.0.3-alpha - Unreleased 
 
 HADOOP-8812. ExitUtil#terminate should print Exception#toString. (eli)
 
+HADOOP-3957. Change MutableQuantiles to use a shared thread for rolling
+over metrics. (Andrew Wang via todd)
+
   OPTIMIZATIONS
 
   BUG FIXES

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java?rev=1390211r1=1390210r2=1390211view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
 (original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
 Tue Sep 25 23:16:20 2012
@@ -35,6 +35,7 @@ import org.apache.hadoop.metrics2.util.Q
 import org.apache.hadoop.metrics2.util.SampleQuantiles;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
  * Watches a stream of long values, maintaining online estimates of specific
@@ -60,8 +61,9 @@ public class MutableQuantiles extends Mu
   @VisibleForTesting
   protected MapQuantile, Long previousSnapshot = null;
 
-  private final ScheduledExecutorService scheduler = Executors
-  .newScheduledThreadPool(1);
+  private static final ScheduledExecutorService scheduler = Executors
+  .newScheduledThreadPool(1, new ThreadFactoryBuilder().setDaemon(true)
+  .setNameFormat(MutableQuantiles-%d).build());
 
   /**
* Instantiates a new {@link MutableQuantiles} for a metric that rolls itself




svn commit: r1390221 - in /hadoop/common/trunk/hadoop-common-project/hadoop-common: CHANGES.txt src/main/bin/hadoop-config.sh src/main/bin/start-all.sh

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 23:43:48 2012
New Revision: 1390221

URL: http://svn.apache.org/viewvc?rev=1390221view=rev
Log:
HADOOP-8794. Rename YARN_HOME to HADOOP_YARN_HOME. Contributed by Vinod K V.

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1390221r1=1390220r2=1390221view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Tue Sep 
25 23:43:48 2012
@@ -301,6 +301,8 @@ Release 2.0.2-alpha - 2012-09-07 
 HADOOP-8689. Make trash a server side configuration option. (eli)
 
 HADOOP-8710. Remove ability for users to easily run the trash emptire. 
(eli)
+
+HADOOP-8794. Rename YARN_HOME to HADOOP_YARN_HOME. (vinodkv via acmurthy)
 
   NEW FEATURES
  

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh?rev=1390221r1=1390220r2=1390221view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
 Tue Sep 25 23:43:48 2012
@@ -269,21 +269,21 @@ fi
 CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/$HDFS_DIR'/*'
 
 # put yarn in classpath if present
-if [ $YARN_HOME =  ]; then
+if [ $HADOOP_YARN_HOME =  ]; then
   if [ -d ${HADOOP_PREFIX}/$YARN_DIR ]; then
-export YARN_HOME=$HADOOP_PREFIX
+export HADOOP_YARN_HOME=$HADOOP_PREFIX
   fi
 fi
 
-if [ -d $YARN_HOME/$YARN_DIR/webapps ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_DIR
+if [ -d $HADOOP_YARN_HOME/$YARN_DIR/webapps ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_DIR
 fi
 
-if [ -d $YARN_HOME/$YARN_LIB_JARS_DIR ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_LIB_JARS_DIR'/*'
+if [ -d $HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR'/*'
 fi
 
-CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_DIR'/*'
+CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_DIR'/*'
 
 # put mapred in classpath if present AND different from YARN
 if [ $HADOOP_MAPRED_HOME =  ]; then
@@ -292,7 +292,7 @@ if [ $HADOOP_MAPRED_HOME =  ]; then
   fi
 fi
 
-if [ $HADOOP_MAPRED_HOME/$MAPRED_DIR != $YARN_HOME/$YARN_DIR ] ; then
+if [ $HADOOP_MAPRED_HOME/$MAPRED_DIR != $HADOOP_YARN_HOME/$YARN_DIR ] ; 
then
   if [ -d $HADOOP_MAPRED_HOME/$MAPRED_DIR/webapps ]; then
 CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/$MAPRED_DIR
   fi

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh?rev=1390221r1=1390220r2=1390221view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
 Tue Sep 25 23:43:48 2012
@@ -33,6 +33,6 @@ if [ -f ${HADOOP_HDFS_HOME}/sbin/start
 fi
 
 # start yarn daemons if yarn is present
-if [ -f ${YARN_HOME}/sbin/start-yarn.sh ]; then
-  ${YARN_HOME}/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
+if [ -f ${HADOOP_YARN_HOME}/sbin/start-yarn.sh ]; then
+  ${HADOOP_YARN_HOME}/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
 fi




svn commit: r1390222 - in /hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common: CHANGES.txt src/main/bin/hadoop-config.sh src/main/bin/start-all.sh

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 23:44:33 2012
New Revision: 1390222

URL: http://svn.apache.org/viewvc?rev=1390222view=rev
Log:
Merge -c 1390218 from trunk to branch-2 to fix HADOOP-8794. Rename YARN_HOME to 
HADOOP_YARN_HOME. Contributed by Vinod K V.

Modified:

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1390222r1=1390221r2=1390222view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
Tue Sep 25 23:44:33 2012
@@ -51,6 +51,8 @@ Release 2.0.2-alpha - 2012-09-07 
 HADOOP-8689. Make trash a server side configuration option. (eli)
 
 HADOOP-8710. Remove ability for users to easily run the trash emptire. 
(eli)
+
+HADOOP-8794. Rename YARN_HOME to HADOOP_YARN_HOME. (vinodkv via acmurthy)
 
   NEW FEATURES
  

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh?rev=1390222r1=1390221r2=1390222view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
 (original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
 Tue Sep 25 23:44:33 2012
@@ -268,21 +268,21 @@ fi
 CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/$HDFS_DIR'/*'
 
 # put yarn in classpath if present
-if [ $YARN_HOME =  ]; then
+if [ $HADOOP_YARN_HOME =  ]; then
   if [ -d ${HADOOP_PREFIX}/$YARN_DIR ]; then
-export YARN_HOME=$HADOOP_PREFIX
+export HADOOP_YARN_HOME=$HADOOP_PREFIX
   fi
 fi
 
-if [ -d $YARN_HOME/$YARN_DIR/webapps ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_DIR
+if [ -d $HADOOP_YARN_HOME/$YARN_DIR/webapps ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_DIR
 fi
 
-if [ -d $YARN_HOME/$YARN_LIB_JARS_DIR ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_LIB_JARS_DIR'/*'
+if [ -d $HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR'/*'
 fi
 
-CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_DIR'/*'
+CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_DIR'/*'
 
 # put mapred in classpath if present AND different from YARN
 if [ $HADOOP_MAPRED_HOME =  ]; then
@@ -291,7 +291,7 @@ if [ $HADOOP_MAPRED_HOME =  ]; then
   fi
 fi
 
-if [ $HADOOP_MAPRED_HOME/$MAPRED_DIR != $YARN_HOME/$YARN_DIR ] ; then
+if [ $HADOOP_MAPRED_HOME/$MAPRED_DIR != $HADOOP_YARN_HOME/$YARN_DIR ] ; 
then
   if [ -d $HADOOP_MAPRED_HOME/$MAPRED_DIR/webapps ]; then
 CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/$MAPRED_DIR
   fi

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh?rev=1390222r1=1390221r2=1390222view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
 (original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
 Tue Sep 25 23:44:33 2012
@@ -33,6 +33,6 @@ if [ -f ${HADOOP_HDFS_HOME}/sbin/start
 fi
 
 # start yarn daemons if yarn is present
-if [ -f ${YARN_HOME}/sbin/start-yarn.sh ]; then
-  ${YARN_HOME}/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
+if [ -f ${HADOOP_YARN_HOME}/sbin/start-yarn.sh ]; then
+  ${HADOOP_YARN_HOME}/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
 fi




svn commit: r1390223 - in /hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common: CHANGES.txt src/main/bin/hadoop-config.sh src/main/bin/start-all.sh

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Tue Sep 25 23:44:45 2012
New Revision: 1390223

URL: http://svn.apache.org/viewvc?rev=1390223view=rev
Log:
Merge -c 1390218 from trunk to branch-2.0.2-alpha to fix HADOOP-8794. Rename 
YARN_HOME to HADOOP_YARN_HOME. Contributed by Vinod K V.

Modified:

hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh

Modified: 
hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1390223r1=1390222r2=1390223view=diff
==
--- 
hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/CHANGES.txt
 (original)
+++ 
hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/CHANGES.txt
 Tue Sep 25 23:44:45 2012
@@ -10,6 +10,8 @@ Release 2.0.2-alpha - 2012-09-07 
 HADOOP-8689. Make trash a server side configuration option. (eli)
 
 HADOOP-8710. Remove ability for users to easily run the trash emptier. 
(eli)
+
+HADOOP-8794. Rename YARN_HOME to HADOOP_YARN_HOME. (vinodkv via acmurthy)
 
   NEW FEATURES
 

Modified: 
hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh?rev=1390223r1=1390222r2=1390223view=diff
==
--- 
hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
 (original)
+++ 
hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
 Tue Sep 25 23:44:45 2012
@@ -267,21 +267,21 @@ fi
 CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/$HDFS_DIR'/*'
 
 # put yarn in classpath if present
-if [ $YARN_HOME =  ]; then
+if [ $HADOOP_YARN_HOME =  ]; then
   if [ -d ${HADOOP_PREFIX}/$YARN_DIR ]; then
-export YARN_HOME=$HADOOP_PREFIX
+export HADOOP_YARN_HOME=$HADOOP_PREFIX
   fi
 fi
 
-if [ -d $YARN_HOME/$YARN_DIR/webapps ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_DIR
+if [ -d $HADOOP_YARN_HOME/$YARN_DIR/webapps ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_DIR
 fi
 
-if [ -d $YARN_HOME/$YARN_LIB_JARS_DIR ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_LIB_JARS_DIR'/*'
+if [ -d $HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR'/*'
 fi
 
-CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_DIR'/*'
+CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_DIR'/*'
 
 # put mapred in classpath if present AND different from YARN
 if [ $HADOOP_MAPRED_HOME =  ]; then
@@ -290,7 +290,7 @@ if [ $HADOOP_MAPRED_HOME =  ]; then
   fi
 fi
 
-if [ $HADOOP_MAPRED_HOME/$MAPRED_DIR != $YARN_HOME/$YARN_DIR ] ; then
+if [ $HADOOP_MAPRED_HOME/$MAPRED_DIR != $HADOOP_YARN_HOME/$YARN_DIR ] ; 
then
   if [ -d $HADOOP_MAPRED_HOME/$MAPRED_DIR/webapps ]; then
 CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/$MAPRED_DIR
   fi

Modified: 
hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh?rev=1390223r1=1390222r2=1390223view=diff
==
--- 
hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
 (original)
+++ 
hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
 Tue Sep 25 23:44:45 2012
@@ -33,6 +33,6 @@ if [ -f ${HADOOP_HDFS_HOME}/sbin/start
 fi
 
 # start yarn daemons if yarn is present
-if [ -f ${YARN_HOME}/sbin/start-yarn.sh ]; then
-  ${YARN_HOME}/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
+if [ -f ${HADOOP_YARN_HOME}/sbin/start-yarn.sh ]; then
+  ${HADOOP_YARN_HOME}/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
 fi




svn commit: r1390230 [1/2] - /hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Wed Sep 26 00:07:21 2012
New Revision: 1390230

URL: http://svn.apache.org/viewvc?rev=1390230view=rev
Log:
Added release notes for 2.0.2-alpha.

Modified:

hadoop/common/branches/branch-2.0.2-alpha/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html



svn commit: r1390231 - /hadoop/common/tags/release-2.0.2-alpha-rc1/

2012-09-25 Thread acmurthy
Author: acmurthy
Date: Wed Sep 26 00:07:59 2012
New Revision: 1390231

URL: http://svn.apache.org/viewvc?rev=1390231view=rev
Log:
Preparing for hadoop-2.0.2-alpha release.

Added:
hadoop/common/tags/release-2.0.2-alpha-rc1/   (props changed)
  - copied from r1390230, hadoop/common/branches/branch-2.0.2-alpha/

Propchange: hadoop/common/tags/release-2.0.2-alpha-rc1/
--
--- svn:ignore (added)
+++ svn:ignore Wed Sep 26 00:07:59 2012
@@ -0,0 +1,5 @@
+.classpath
+.git
+.project
+.settings
+target

Propchange: hadoop/common/tags/release-2.0.2-alpha-rc1/
--
--- svn:mergeinfo (added)
+++ svn:mergeinfo Wed Sep 26 00:07:59 2012
@@ -0,0 +1,3 @@
+/hadoop/common/branches/HDFS-3042:1306184-1342109
+/hadoop/common/branches/branch-0.23-PB:1227776-1294021
+/hadoop/common/trunk:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163490,1163768,1163852,1163858,1163981,1164255,1164301,1164339,1166009,1166402,1167001,1167383,1167662,1170085,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189613,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1239752,1240897,1240928,1243065,1243104,1244766,1245751,1245762,1293419,1295061,1295227,1296556,1298044,1298696,1298700,1299045,1299434,1299963,1301308,1301312,1301820,1301871,1302624,
 
1302704-1302705,1303474,1304063,1304099,1304112,1304118,1305230,1309625,1310185,1311556,1312029,1329319,1333557,1334216,1342112,1348207,1349124,1349616,1351818,1361813,1373683,1374696,1375450,1375829,1379646




[Hadoop Wiki] Update of HowToContribute by Arun C Murthy

2012-09-25 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The HowToContribute page has been changed by Arun C Murthy:
http://wiki.apache.org/hadoop/HowToContribute?action=diffrev1=76rev2=77

  
  Unit tests development guidelines HowToDevelopUnitTests
  
+  Compiling 'classic' MapReduce or MR1 
+ 
+ Please ensure you don't break 'classic' MR1 tests which aren't yet mavenized 
by doing so:
+ 
+ {{{
+  $ mvn install
+  $ cd hadoop-mapreduce-project
+  $ ant veryclean all-jars -Dresolvers=internal
+ }}}
+ 
   Javadoc 
  Please also check the javadoc.