[Hadoop Wiki] Update of HCFS/Progress by SteveWatt

2013-06-10 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The HCFS/Progress page has been changed by SteveWatt:
https://wiki.apache.org/hadoop/HCFS/Progress?action=diffrev1=9rev2=10

  Next Meeting  
  
  June 10th 9AM PST 
- Google Hangout - 
https://plus.google.com/hangouts/_/559485d5ccc40e89e9b3049d8d9c4d01eb71d0ca?authuser=0hl=en
+ Google Hangout - 
https://plus.google.com/hangouts/_/6b3ac2351f2a794717677be7c29a21599c0433ab?authuser=0hl=en
  
  Agenda:
  
@@ -51, +51 @@

  * several google hangouts and a workshop to discuss the topics
  
  The following parties responded that they were interested in participation:
- - mbhandar...@gopivotal.com, shv.had...@gmail.com, ste...@hortonworks.com, 
erlv5...@gmail.com, shaposh...@gmail.com, apurt...@apache.org, 
cdoug...@apache.org
+ - mbhandar...@gopivotal.com, shv.had...@gmail.com, ste...@hortonworks.com, 
erlv5...@gmail.com, shaposh...@gmail.com, apurt...@apache.org, 
cdoug...@apache.org, jayh...@cs.ucsc.edu, san...@hortonworks.com
  


svn commit: r1491535 - in /hadoop/common/branches/branch-1.2: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java

2013-06-10 Thread suresh
Author: suresh
Date: Mon Jun 10 17:42:05 2013
New Revision: 1491535

URL: http://svn.apache.org/r1491535
Log:
HDFS-4581. Merge r1461615 from branch-1

Modified:
hadoop/common/branches/branch-1.2/CHANGES.txt

hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java

Modified: hadoop/common/branches/branch-1.2/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.2/CHANGES.txt?rev=1491535r1=1491534r2=1491535view=diff
==
--- hadoop/common/branches/branch-1.2/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.2/CHANGES.txt Mon Jun 10 17:42:05 2013
@@ -20,6 +20,9 @@ Release 1.2.1 - Unreleased 
 MAPREDUCE-3859. Fix CapacityScheduler to correctly compute runtime queue
 limits for high-ram jobs. (Sergey Tryuber via acmurthy)
 
+HDFS-4581. DataNode.checkDiskError should not be called on network errors.
+(Rohit Kochar via kihwal)
+
 Release 1.2.0 - 2013.05.05
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1491535r1=1491534r2=1491535view=diff
==
--- 
hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 (original)
+++ 
hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 Mon Jun 10 17:42:05 2013
@@ -27,8 +27,10 @@ import java.io.OutputStream;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.Socket;
+import java.net.SocketException;
 import java.net.SocketTimeoutException;
 import java.net.UnknownHostException;
+import java.nio.channels.ClosedByInterruptException;
 import java.nio.channels.ServerSocketChannel;
 import java.nio.channels.SocketChannel;
 import java.security.NoSuchAlgorithmException;
@@ -923,10 +925,16 @@ public class DataNode extends Configured
   /** Check if there is no space in disk 
*  @param e that caused this checkDiskError call
**/
-  protected void checkDiskError(Exception e ) throws IOException {
-
-LOG.warn(checkDiskError: exception: , e);
-
+  protected void checkDiskError(Exception e ) throws IOException {
+LOG.warn(checkDiskError: exception: , e);  
+if (e instanceof SocketException || e instanceof SocketTimeoutException
+ || e instanceof ClosedByInterruptException 
+ || e.getMessage().startsWith(Broken pipe)) {
+  LOG.info(Not checking disk as checkDiskError was called on a network +
+ related exception); 
+  return;
+}
+
 if (e.getMessage() != null 
 e.getMessage().startsWith(No space left on device)) {
   throw new DiskOutOfSpaceException(No space left on device);
@@ -1543,8 +1551,11 @@ public class DataNode extends Configured
 LOG.warn(dnRegistration + :Failed to transfer  + b +  to  + 
targets[0].getName()
 +  got  + StringUtils.stringifyException(ie));
 // check if there are any disk problem
-datanode.checkDiskError();
-
+try{
+  checkDiskError(ie);
+} catch(IOException e) {
+  LOG.warn(DataNode.checkDiskError failed in run() with: , e);
+}
   } finally {
 xmitsInProgress.getAndDecrement();
 IOUtils.closeStream(blockSender);




svn commit: r1491539 - in /hadoop/common/branches/branch-1.2: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java

2013-06-10 Thread suresh
Author: suresh
Date: Mon Jun 10 17:49:26 2013
New Revision: 1491539

URL: http://svn.apache.org/r1491539
Log:
HDFS-4699. Merge r1469843 from branch-1

Modified:
hadoop/common/branches/branch-1.2/CHANGES.txt

hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java

Modified: hadoop/common/branches/branch-1.2/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.2/CHANGES.txt?rev=1491539r1=1491538r2=1491539view=diff
==
--- hadoop/common/branches/branch-1.2/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.2/CHANGES.txt Mon Jun 10 17:49:26 2013
@@ -23,6 +23,9 @@ Release 1.2.1 - Unreleased 
 HDFS-4581. DataNode.checkDiskError should not be called on network errors.
 (Rohit Kochar via kihwal)
 
+HDFS-4699. Additional conditions for avoiding unnecessary 
+DataNode.checkDiskError calls. (Chris Nauroth via kihwal)
+
 Release 1.2.0 - 2013.05.05
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1491539r1=1491538r2=1491539view=diff
==
--- 
hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 (original)
+++ 
hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 Mon Jun 10 17:49:26 2013
@@ -929,7 +929,10 @@ public class DataNode extends Configured
 LOG.warn(checkDiskError: exception: , e);  
 if (e instanceof SocketException || e instanceof SocketTimeoutException
  || e instanceof ClosedByInterruptException 
- || e.getMessage().startsWith(Broken pipe)) {
+ || e.getMessage().startsWith(An established connection was aborted)
+ || e.getMessage().startsWith(Broken pipe)
+ || e.getMessage().startsWith(Connection reset)
+ || e.getMessage().contains(java.nio.channels.SocketChannel)) {
   LOG.info(Not checking disk as checkDiskError was called on a network +
  related exception); 
   return;




svn commit: r1491541 - in /hadoop/common/branches/branch-1.2: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java

2013-06-10 Thread suresh
Author: suresh
Date: Mon Jun 10 17:52:31 2013
New Revision: 1491541

URL: http://svn.apache.org/r1491541
Log:
HDFS-4880. Merge r1490758 from branch-1

Modified:
hadoop/common/branches/branch-1.2/CHANGES.txt

hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java

Modified: hadoop/common/branches/branch-1.2/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.2/CHANGES.txt?rev=1491541r1=1491540r2=1491541view=diff
==
--- hadoop/common/branches/branch-1.2/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.2/CHANGES.txt Mon Jun 10 17:52:31 2013
@@ -8,6 +8,9 @@ Release 1.2.1 - Unreleased 
 
   IMPROVEMENTS
 
+HDFS-4880. Print the image and edits file loaded by the namenode in the
+logs. (Arpit Agarwal via suresh)
+
   BUG FIXES
 
 MAPREDUCE-5206. Ensure that a job doesn't get added to RetiredJobs

Modified: 
hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1491541r1=1491540r2=1491541view=diff
==
--- 
hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 (original)
+++ 
hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 Mon Jun 10 17:52:31 2013
@@ -637,6 +637,8 @@ public class FSEditLog {
 long highestGenStamp = -1;
 long startTime = FSNamesystem.now();
 
+LOG.info(Start loading edits file  + edits.getName());
+//
 // Keep track of the file offsets of the last several opcodes.
 // This is handy when manually recovering corrupted edits files.
 PositionTrackingInputStream tracker = 

Modified: 
hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1491541r1=1491540r2=1491541view=diff
==
--- 
hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 (original)
+++ 
hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 Mon Jun 10 17:52:31 2013
@@ -823,14 +823,17 @@ public class FSImage extends Storage {
 needToSave |= recoverInterruptedCheckpoint(latestNameSD, latestEditsSD);
 
 long startTime = FSNamesystem.now();
-long imageSize = getImageFile(latestNameSD, NameNodeFile.IMAGE).length();
+File imageFile = getImageFile(latestNameSD, NameNodeFile.IMAGE);
+long imageSize = imageFile.length();
 
 //
 // Load in bits
 //
 latestNameSD.read();
-needToSave |= loadFSImage(getImageFile(latestNameSD, NameNodeFile.IMAGE));
-LOG.info(Image file of size  + imageSize +  loaded in  
+LOG.info(Start loading image file  + imageFile.getPath().toString());
+needToSave |= loadFSImage(imageFile);
+LOG.info(Image file  + imageFile.getPath().toString() +
+ of size  + imageSize +  bytes loaded in 
 + (FSNamesystem.now() - startTime)/1000 +  seconds.);
 
 // Load latest edits
@@ -1067,8 +1070,9 @@ public class FSImage extends Storage {
   out.close();
 }
 
-LOG.info(Image file of size  + newFile.length() +  saved in  
-+ (FSNamesystem.now() - startTime)/1000 +  seconds.);
+LOG.info(Image file  + newFile +  of size  + newFile.length() +
+ bytes saved in  + (FSNamesystem.now() - startTime)/1000 +
+ seconds.);
   }
 
   /**




svn commit: r1491544 - in /hadoop/common/branches/branch-1.2: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java src/test/org/apache/hadoop/hdfs/server/balancer/TestBalancerWith

2013-06-10 Thread suresh
Author: suresh
Date: Mon Jun 10 17:59:29 2013
New Revision: 1491544

URL: http://svn.apache.org/r1491544
Log:
HDFS-4261. Merge r1488865 from branch-1

Modified:
hadoop/common/branches/branch-1.2/CHANGES.txt

hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java

hadoop/common/branches/branch-1.2/src/test/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java

Modified: hadoop/common/branches/branch-1.2/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.2/CHANGES.txt?rev=1491544r1=1491543r2=1491544view=diff
==
--- hadoop/common/branches/branch-1.2/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.2/CHANGES.txt Mon Jun 10 17:59:29 2013
@@ -29,6 +29,9 @@ Release 1.2.1 - Unreleased 
 HDFS-4699. Additional conditions for avoiding unnecessary 
 DataNode.checkDiskError calls. (Chris Nauroth via kihwal)
 
+HDFS-4261. Fix bugs in Balaner causing infinite loop and
+TestBalancerWithNodeGroup timeing out.  (Junping Du via szetszwo)
+
 Release 1.2.0 - 2013.05.05
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1491544r1=1491543r2=1491544view=diff
==
--- 
hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java
 (original)
+++ 
hadoop/common/branches/branch-1.2/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java
 Mon Jun 10 17:59:29 2013
@@ -193,6 +193,8 @@ public class Balancer implements Tool {
*/
   public static final int MAX_NUM_CONCURRENT_MOVES = 5;
   
+  public static final int MAX_NO_PENDING_BLOCK_ITERATIONS = 5;
+  
   private Configuration conf;
 
   private double threshold = 10D;
@@ -746,6 +748,7 @@ public class Balancer implements Tool {
   long startTime = Util.now();
   this.blocksToReceive = 2*scheduledSize;
   boolean isTimeUp = false;
+  int noPendingBlockIteration = 0;
   while(!isTimeUp  scheduledSize  0 
   (!srcBlockList.isEmpty() || blocksToReceive  0)) {
 PendingBlockMove pendingBlock = chooseNextBlockToMove();
@@ -769,7 +772,15 @@ public class Balancer implements Tool {
 LOG.warn(StringUtils.stringifyException(e));
 return;
   }
-} 
+} else {
+  // source node cannot find a pendingBlockToMove, iteration +1
+  noPendingBlockIteration++;
+  // in case no blocks can be moved for source node's task,
+  // jump out of while-loop after 5 iterations.
+  if (noPendingBlockIteration = MAX_NO_PENDING_BLOCK_ITERATIONS) {
+scheduledSize = 0;
+  }
+}
 
 // check if time is up or not
 if (Util.now()-startTime  MAX_ITERATION_TIME) {
@@ -1496,7 +1507,11 @@ public class Balancer implements Tool {
   Formatter formatter = new Formatter(System.out);
   System.out.println(Time Stamp   Iteration#  Bytes Already 
Moved  Bytes Left To Move  Bytes Being Moved);
   int iterations = 0;
+  
   while (true) {
+// clean all lists at the beginning of balancer iteration.
+resetData();
+
 /* get all live datanodes of a cluster and their disk usage
  * decide the number of bytes need to be moved
  */
@@ -1547,9 +1562,6 @@ public class Balancer implements Tool {
 return NO_MOVE_PROGRESS;
   }
 }
-
-// clean all lists
-resetData();
 
 try {
   Thread.sleep(2*conf.getLong(dfs.heartbeat.interval, 3));

Modified: 
hadoop/common/branches/branch-1.2/src/test/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.2/src/test/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java?rev=1491544r1=1491543r2=1491544view=diff
==
--- 
hadoop/common/branches/branch-1.2/src/test/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
 (original)
+++ 
hadoop/common/branches/branch-1.2/src/test/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
 Mon Jun 10 17:59:29 2013
@@ -216,7 +216,7 @@ public class TestBalancerWithNodeGroup {
* to n0 or n1 as balancer policy with node group. Thus, we expect the 
balancer
* to end in 5 iterations without move block process.
*/
-  @Test
+  @Test(timeout=6)
   public void testBalancerEndInNoMoveProgress() throws Exception {
 Configuration conf = createConf();
 long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
@@ -255,7 

svn commit: r1491545 - /hadoop/common/branches/branch-1/CHANGES.txt

2013-06-10 Thread suresh
Author: suresh
Date: Mon Jun 10 18:00:12 2013
New Revision: 1491545

URL: http://svn.apache.org/r1491545
Log:
Move HDFS-4581, HDFS-4699, HDFS-4261 and HDFS-4880 to release 1.2.1 section

Modified:
hadoop/common/branches/branch-1/CHANGES.txt

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1491545r1=1491544r2=1491545view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Mon Jun 10 18:00:12 2013
@@ -14,9 +14,6 @@ Release 1.3.0 - unreleased
 HADOOP-9573. Fix test-patch script to work with the enhanced
 PreCommit-Admin script.(Giridharan Kesavan)
 
-HDFS-4880. Print the image and edits file loaded by the namenode in the
-logs. (Arpit Agarwal via suresh)
-
   BUG FIXES
 
 MAPREDUCE-5047. keep.failed.task.files=true causes job failure on 
@@ -37,18 +34,12 @@ Release 1.3.0 - unreleased
 HDFS-4622. Remove redundant synchronized from 
 FSNamesystem#rollEditLog in branch-1. (Jing Zhao via suresh)
 
-HDFS-4581. DataNode.checkDiskError should not be called on network errors.
-(Rohit Kochar via kihwal)
-
 MAPREDUCE-2817. MiniRMCluster hardcodes 'mapred.local.dir' configuration 
 to 'build/test/mapred/local'. (rkanter via tucu)
 
 MAPREDUCE-5133. TestSubmitJob.testSecureJobExecution is flaky due to job 
 dir deletion race. (sandyr via tucu)
 
-HDFS-4699. Additional conditions for avoiding unnecessary 
-DataNode.checkDiskError calls. (Chris Nauroth via kihwal)
-
 MAPREDUCE-5218. Annotate (comment) internal classes as Private. 
 (kkambatl via tucu)
 
@@ -61,9 +52,6 @@ Release 1.3.0 - unreleased
 HADOOP-8981. TestMetricsSystemImpl fails on Windows. (Xuan Gong, backported
 by Chris Nauroth via suresh)
 
-HDFS-4261. Fix bugs in Balaner causing infinite loop and
-TestBalancerWithNodeGroup timeing out.  (Junping Du via szetszwo)
-
 MAPREDUCE-5250. Searching for ';' in JobTracker History throws 
 ArrayOutOfBoundException. (kkambatl via tucu)
 
@@ -75,6 +63,9 @@ Release 1.2.1 - Unreleased 
 
   IMPROVEMENTS
 
+HDFS-4880. Print the image and edits file loaded by the namenode in the
+logs. (Arpit Agarwal via suresh)
+
   BUG FIXES
 
 MAPREDUCE-5206. Ensure that a job doesn't get added to RetiredJobs
@@ -87,6 +78,15 @@ Release 1.2.1 - Unreleased 
 MAPREDUCE-3859. Fix CapacityScheduler to correctly compute runtime queue
 limits for high-ram jobs. (Sergey Tryuber via acmurthy)
 
+HDFS-4581. DataNode.checkDiskError should not be called on network errors.
+(Rohit Kochar via kihwal)
+
+HDFS-4699. Additional conditions for avoiding unnecessary 
+DataNode.checkDiskError calls. (Chris Nauroth via kihwal)
+
+HDFS-4261. Fix bugs in Balaner causing infinite loop and
+TestBalancerWithNodeGroup timing out.  (Junping Du via szetszwo)
+
 Release 1.2.0 - 2013.05.05
 
   INCOMPATIBLE CHANGES




svn commit: r1491548 - in /hadoop/common/trunk/hadoop-common-project/hadoop-common: CHANGES.txt src/main/bin/hadoop-config.sh

2013-06-10 Thread jlowe
Author: jlowe
Date: Mon Jun 10 18:12:36 2013
New Revision: 1491548

URL: http://svn.apache.org/r1491548
Log:
HADOOP-9581. hadoop --config non-existent directory should result in error. 
Contributed by Ashwin Shankar

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1491548r1=1491547r2=1491548view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Mon Jun 
10 18:12:36 2013
@@ -571,6 +571,9 @@ Release 2.1.0-beta - UNRELEASED
 
 HADOOP-9605. Update junit dependency. (Timothy St. Clair via cos)
 
+HADOOP-9581. hadoop --config non-existent directory should result in error
+(Ashwin Shankar via jlowe)
+
   BREAKDOWN OF HADOOP-8562 SUBTASKS AND RELATED JIRAS
 
 HADOOP-8924. Hadoop Common creating package-info.java must not depend on

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh?rev=1491548r1=1491547r2=1491548view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
 Mon Jun 10 18:12:36 2013
@@ -60,6 +60,10 @@ then
  then
  shift
  confdir=$1
+ if [ ! -d $confdir ]; then
+echo Error: Cannot find configuration directory: $confdir
+exit 1
+ fi
  shift
  HADOOP_CONF_DIR=$confdir
 fi




svn commit: r1491549 - in /hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common: CHANGES.txt src/main/bin/hadoop-config.sh

2013-06-10 Thread jlowe
Author: jlowe
Date: Mon Jun 10 18:14:16 2013
New Revision: 1491549

URL: http://svn.apache.org/r1491549
Log:
svn merge -c 1491548 FIXES: HADOOP-9581. hadoop --config non-existent directory 
should result in error. Contributed by Ashwin Shankar

Modified:

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1491549r1=1491548r2=1491549view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
Mon Jun 10 18:14:16 2013
@@ -246,6 +246,9 @@ Release 2.1.0-beta - UNRELEASED
 
 HADOOP-9605. Update junit dependency. (Timothy St. Clair via cos)
 
+HADOOP-9581. hadoop --config non-existent directory should result in error
+(Ashwin Shankar via jlowe)
+
   BREAKDOWN OF HADOOP-8562 SUBTASKS AND RELATED JIRAS
 
 HADOOP-8924. Hadoop Common creating package-info.java must not depend on

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh?rev=1491549r1=1491548r2=1491549view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
 (original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
 Mon Jun 10 18:14:16 2013
@@ -60,6 +60,10 @@ then
  then
  shift
  confdir=$1
+ if [ ! -d $confdir ]; then
+echo Error: Cannot find configuration directory: $confdir
+exit 1
+ fi
  shift
  HADOOP_CONF_DIR=$confdir
 fi




svn commit: r1491550 - in /hadoop/common/branches/branch-2.1-beta/hadoop-common-project/hadoop-common: CHANGES.txt src/main/bin/hadoop-config.sh

2013-06-10 Thread jlowe
Author: jlowe
Date: Mon Jun 10 18:14:27 2013
New Revision: 1491550

URL: http://svn.apache.org/r1491550
Log:
svn merge -c 1491548 FIXES: HADOOP-9581. hadoop --config non-existent directory 
should result in error. Contributed by Ashwin Shankar

Modified:

hadoop/common/branches/branch-2.1-beta/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/branches/branch-2.1-beta/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

Modified: 
hadoop/common/branches/branch-2.1-beta/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1491550r1=1491549r2=1491550view=diff
==
--- 
hadoop/common/branches/branch-2.1-beta/hadoop-common-project/hadoop-common/CHANGES.txt
 (original)
+++ 
hadoop/common/branches/branch-2.1-beta/hadoop-common-project/hadoop-common/CHANGES.txt
 Mon Jun 10 18:14:27 2013
@@ -222,6 +222,9 @@ Release 2.1.0-beta - UNRELEASED
 
 HADOOP-9605. Update junit dependency. (Timothy St. Clair via cos)
 
+HADOOP-9581. hadoop --config non-existent directory should result in error
+(Ashwin Shankar via jlowe)
+
   BREAKDOWN OF HADOOP-8562 SUBTASKS AND RELATED JIRAS
 
 HADOOP-8924. Hadoop Common creating package-info.java must not depend on

Modified: 
hadoop/common/branches/branch-2.1-beta/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh?rev=1491550r1=1491549r2=1491550view=diff
==
--- 
hadoop/common/branches/branch-2.1-beta/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
 (original)
+++ 
hadoop/common/branches/branch-2.1-beta/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
 Mon Jun 10 18:14:27 2013
@@ -60,6 +60,10 @@ then
  then
  shift
  confdir=$1
+ if [ ! -d $confdir ]; then
+echo Error: Cannot find configuration directory: $confdir
+exit 1
+ fi
  shift
  HADOOP_CONF_DIR=$confdir
 fi




svn commit: r1491554 - in /hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common: CHANGES.txt src/main/bin/hadoop-config.sh

2013-06-10 Thread jlowe
Author: jlowe
Date: Mon Jun 10 18:20:17 2013
New Revision: 1491554

URL: http://svn.apache.org/r1491554
Log:
svn merge -c 1491548 FIXES: HADOOP-9581. hadoop --config non-existent directory 
should result in error. Contributed by Ashwin Shankar

Modified:

hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

Modified: 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1491554r1=1491553r2=1491554view=diff
==
--- 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt
 (original)
+++ 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt
 Mon Jun 10 18:20:17 2013
@@ -12,6 +12,9 @@ Release 0.23.9 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-9581. hadoop --config non-existent directory should result in error
+(Ashwin Shankar via jlowe)
+
 Release 0.23.8 - 2013-06-05
   
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh?rev=1491554r1=1491553r2=1491554view=diff
==
--- 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
 (original)
+++ 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
 Mon Jun 10 18:20:17 2013
@@ -60,6 +60,10 @@ then
  then
  shift
  confdir=$1
+ if [ ! -d $confdir ]; then
+echo Error: Cannot find configuration directory: $confdir
+exit 1
+ fi
  shift
  HADOOP_CONF_DIR=$confdir
 fi




[Hadoop Wiki] Update of HCFS/Progress by SteveWatt

2013-06-10 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The HCFS/Progress page has been changed by SteveWatt:
https://wiki.apache.org/hadoop/HCFS/Progress?action=diffrev1=10rev2=11

- '''Proposal for organizing the work'''
+ '''HCFS Workstream Definition'''
  
- If our eventual goal is to create a test suite that comprehensively vets 
adherence of a given Hadoop FileSystem implementation to the FileSystem 
contract and the expected behavior of its operations, then I think we need to 
define what comprehensive means. We should also decide on whether these tests 
are going to be against the FileSystem 1.0 or 2.0 interface.  
+ As agreed to by June 10th Meeting participants: 
  
- My proposal is to organize the work into the following activities:
+ * Focus on Hadoop 2.0 FS Interface. If possible, create a work stream that 
would allow testing and validation of the FS 1.0 Interface.
  
  * An audit of the Hadoop FileSystem 1.0 Test Coverage - 
[[https://wiki.apache.org/hadoop/HCFS/FileSystem-1.0-Tests | I have already 
created a first pass at this]]
  
@@ -12, +12 @@

  
  * An audit of the new Hadoop FS Tests added by Steve Loughran for his 
[[https://issues.apache.org/jira/browse/HADOOP-8545 | Hadoop FS Plugin for 
SWIFT]]
  
- * A document codifying the expected semantics/behavior of the FileSystem 2.0 
Operations - [[ https://issues.apache.org/jira/browse/HADOOP-9371 | Steve 
Loughran has started this already]]
+ * Create JavaDocs reflecting a FileSystem 2.0 Spec that codifying the 
expected semantics/behavior of the FileSystem 2.0 Operations and all the FS 
operations - [[ https://issues.apache.org/jira/browse/HADOOP-9371 | Steve 
Loughran has started this already]]
  
- * A gap analysis that examines the FileSystem 2.0 Class, the expected 
behavior of the Operations and the respective Test Coverage available.
+ * Create a gap analysis that examines the FileSystem 2.0 Class, the expected 
behavior of the Operations and the respective Test Coverage available.
  
  * Create tests to fill in the gaps
+   
+ - Create a workstream to identify if Object/Blob stores have unique 
properties that make them a special case for Test Coverage as a Hadoop FS. 
Create a strategy for handling Object/Block Stores.
+ 
+ * Validation that a given Hadoop FileSystem implementation is compatible 
would involve:
+ 
+  - Functional Validation: Successfully passing the test library that will 
be created (described above) 
+ 
+  - Ecosystem Validation: Successfully passing the Hadoop Integration 
Tests from Apache BigTop
+ 
  
  
  Next Meeting  
  
- June 10th 9AM PST 
- Google Hangout - 
https://plus.google.com/hangouts/_/6b3ac2351f2a794717677be7c29a21599c0433ab?authuser=0hl=en
+ '''June 25th''' a Red Hat in Mountain View. The day before Hadoop Summmit. 
More details to follow.
+ 
+ 
+ ''Work thus far'' 
+ 
+ 
+ '''June 10th''' 9AM PST via Google Hangout
+ 
+ Attendees: Tim St. Clair, Matt Farrellee, Steve Watt, Jay Vyas, Steve 
Loughran, Sanjay Radia, Andrew Purtell, Joe Buck, Roman Shaposhnik, Nathan (?)
  
  Agenda:
  
@@ -32, +48 @@

  
  - Discussion on where people would like to participate
  
+ Outcome:
+ 
+ - Validation of the current goals, plus the addition of:
+   
+   * Leveraging BigTop in order to make use of the Hadoop Ecosystem Tests as 
additional Test Coverage for a FileSystem Implementation
+   
+   * Create a workstream to identify if Object/Blob stores have unique 
properties that make them a special case for Test Coverage as a Hadoop FS. 
Create a strategy for handling Object/Block Stores.
+ 
+   * Focus tests against the AbstractFileSystem class rather than the 
FileSystem Class (which is an abstract class). Yes, this can be confusing.
+ 
+   * Create a Hadoop 2.0 FileSystem Interface Specification for developers 
creating plugins as well as additional background for interested users. This 
should be created as a JavaDoc and managed in JIRA so that it supports proper 
governance.
+ 
+ The workstream definition at the top of this page has been updated to reflect 
the new additions to the initiative.
+ 
  
+ '''June 4th'''
- ''Work thus far'' 
- 
- June 4th 
  
  Created a [[https://github.com/wattsteve/HCFS/blob/master/jdiff/Report.txt | 
diff report]] contrasting Hadoop FileSystem 1.0 and 2.0
  
  Next step is to evaluate how comprehensive the unit test case coverage is for 
FileSystem 1.0 and 2.0. This is a work in progress 
[[https://wiki.apache.org/hadoop/HCFS/FileSystem-1.0-Tests | Audit of the 
FileSystem 1.0 Test Library ]]
  
  
- May 23rd - A broader call for participation was made to the hadoop-core dev 
proposing:
+ '''May 23rd''' - A broader call for participation was made to the hadoop-core 
dev proposing:
  
  * broader participation in [[ 
https://issues.apache.org/jira/browse/HADOOP-9371 | defining the expected 
behavior of Hadoop FileSystem operations]]
  


[Hadoop Wiki] Update of HCFS/Progress by SteveWatt

2013-06-10 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The HCFS/Progress page has been changed by SteveWatt:
https://wiki.apache.org/hadoop/HCFS/Progress?action=diffrev1=11rev2=12

- '''HCFS Workstream Definition'''
+ '''Hadoop FileSystem Validation Workstream'''
  
- As agreed to by June 10th Meeting participants: 
+ Hadoop has a pluggable FileSystem Architecture. 3rd party FileSystems can be 
enabled for Hadoop by developing a plugin that mediates between the Hadoop 
FileSystem Interface and the interface of the 3rd Party FileSystem. For those 
developing a Hadoop FileSystem plugin, there is no comprehensive test library 
to validate that their plugin creates a Hadoop FileSystem implementation that 
is Hadoop compatible.  
  
- * Focus on Hadoop 2.0 FS Interface. If possible, create a work stream that 
would allow testing and validation of the FS 1.0 Interface.
+ What do we mean by comprehensive? We mean that there is a test for every 
single operation in the FS Interface that properly tests the expected behavior 
of the operation given the full variability of its parameters. To create a 
comprehensive test library, we plan to do the following:
  
- * An audit of the Hadoop FileSystem 1.0 Test Coverage - 
[[https://wiki.apache.org/hadoop/HCFS/FileSystem-1.0-Tests | I have already 
created a first pass at this]]
+ * Focus on the Hadoop 2.0 FS Interface. If possible, create a work stream 
that would allow testing and validation of the FS 1.0 Interface also.
  
- * An audit of the Hadoop FileSystem 2.0 Test Coverage
+ * Undertake an audit of the Hadoop FileSystem 1.0 Test Coverage - 
[[https://wiki.apache.org/hadoop/HCFS/FileSystem-1.0-Tests | Steve Watt has 
already created a first pass at this. Feel free to improve it.]]
  
- * An audit of the new Hadoop FS Tests added by Steve Loughran for his 
[[https://issues.apache.org/jira/browse/HADOOP-8545 | Hadoop FS Plugin for 
SWIFT]]
+ * Undertake an audit of the Hadoop FileSystem 2.0 Test Coverage
  
- * Create JavaDocs reflecting a FileSystem 2.0 Spec that codifying the 
expected semantics/behavior of the FileSystem 2.0 Operations and all the FS 
operations - [[ https://issues.apache.org/jira/browse/HADOOP-9371 | Steve 
Loughran has started this already]]
+ - This includes an audit of the new Hadoop FS Tests added by Steve 
Loughran for his [[https://issues.apache.org/jira/browse/HADOOP-8545 | Hadoop 
FS Plugin for SWIFT]]
  
- * Create a gap analysis that examines the FileSystem 2.0 Class, the expected 
behavior of the Operations and the respective Test Coverage available.
+ * Document the FileSystem 2.0 Specification (as a JavaDoc) as a JIRA Ticket
+ - This includes resolving and documenting the expected behavior of the 
FileSystem 2.0 Operations and all the FS operations - [[ 
https://issues.apache.org/jira/browse/HADOOP-9371 | Steve Loughran has started 
this already]]
+ 
+ * Create a gap analysis contrasting the FileSystem 2.0 Specification and the 
audits of existing FileSystem 2.0 Test Coverage.
  
  * Create tests to fill in the gaps

- - Create a workstream to identify if Object/Blob stores have unique 
properties that make them a special case for Test Coverage as a Hadoop FS. 
Create a strategy for handling Object/Block Stores.
+ - Also, create a test strategy for handling Object/Block Stores as Hadoop 
FileSystems
  
- * Validation that a given Hadoop FileSystem implementation is compatible 
would involve:
+ Once the comprehensive test library is complete, it can then be used by the 
provider of a 3rd Party FileSystem to verify compatibility with Hadoop by:
+  
+ - Passing Functional Validation: Successfully passing the test library 
that will be created (described above) 
  
-  - Functional Validation: Successfully passing the test library that will 
be created (described above) 
- 
-  - Ecosystem Validation: Successfully passing the Hadoop Integration 
Tests from Apache BigTop
+ - Passing Ecosystem Validation: Successfully passing the Hadoop 
Integration Tests from Apache BigTop
  
  
  
  Next Meeting  
  
- '''June 25th''' a Red Hat in Mountain View. The day before Hadoop Summmit. 
More details to follow.
+ '''June 25th 2013''' a Red Hat in Mountain View. The day before Hadoop 
Summmit. More details to follow.
  
  
  ''Work thus far'' 
  
  
- '''June 10th''' 9AM PST via Google Hangout
+ '''June 10th 2013''' 9AM PST via Google Hangout
  
  Attendees: Tim St. Clair, Matt Farrellee, Steve Watt, Jay Vyas, Steve 
Loughran, Sanjay Radia, Andrew Purtell, Joe Buck, Roman Shaposhnik, Nathan (?)
  
@@ -63, +66 @@

  The workstream definition at the top of this page has been updated to reflect 
the new additions to the initiative.
  
  
- '''June 4th'''
+ '''June 4th 2013'''
  
  Created a [[https://github.com/wattsteve/HCFS/blob/master/jdiff/Report.txt | 
diff report]] contrasting Hadoop FileSystem 1.0 and 2.0
  
  Next step is to 

svn commit: r1491668 - in /hadoop/common/trunk/hadoop-common-project/hadoop-common: CHANGES.txt src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java

2013-06-10 Thread atm
Author: atm
Date: Tue Jun 11 01:19:20 2013
New Revision: 1491668

URL: http://svn.apache.org/r1491668
Log:
HADOOP-9604. Javadoc of FSDataOutputStream is slightly inaccurate. Contributed 
by Jingguo Yao.

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1491668r1=1491667r2=1491668view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Tue Jun 
11 01:19:20 2013
@@ -446,6 +446,9 @@ Release 2.1.0-beta - UNRELEASED
 
 HADOOP-9287. Parallel-testing hadoop-common (Andrey Klochkov via jlowe)
 
+HADOOP-9604. Javadoc of FSDataOutputStream is slightly inaccurate. (Jingguo
+Yao via atm)
+
   OPTIMIZATIONS
 
 HADOOP-9150. Avoid unnecessary DNS resolution attempts for logical URIs

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java?rev=1491668r1=1491667r2=1491668view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
 Tue Jun 11 01:19:20 2013
@@ -26,9 +26,8 @@ import java.io.OutputStream;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
-/** Utility that wraps a {@link OutputStream} in a {@link DataOutputStream},
- * buffers output through a {@link BufferedOutputStream} and creates a checksum
- * file. */
+/** Utility that wraps a {@link OutputStream} in a {@link DataOutputStream}.
+ */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class FSDataOutputStream extends DataOutputStream implements Syncable {




svn commit: r1491669 - in /hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common: CHANGES.txt src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java

2013-06-10 Thread atm
Author: atm
Date: Tue Jun 11 01:20:44 2013
New Revision: 1491669

URL: http://svn.apache.org/r1491669
Log:
HADOOP-9604. Javadoc of FSDataOutputStream is slightly inaccurate. Contributed 
by Jingguo Yao.

Modified:

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1491669r1=1491668r2=1491669view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
Tue Jun 11 01:20:44 2013
@@ -121,6 +121,9 @@ Release 2.1.0-beta - UNRELEASED
 
 HADOOP-9287. Parallel-testing hadoop-common (Andrey Klochkov via jlowe)
 
+HADOOP-9604. Javadoc of FSDataOutputStream is slightly inaccurate. (Jingguo
+Yao via atm)
+
   OPTIMIZATIONS
 
 HADOOP-9150. Avoid unnecessary DNS resolution attempts for logical URIs

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java?rev=1491669r1=1491668r2=1491669view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
 (original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
 Tue Jun 11 01:20:44 2013
@@ -22,9 +22,8 @@ import java.io.*;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
-/** Utility that wraps a {@link OutputStream} in a {@link DataOutputStream},
- * buffers output through a {@link BufferedOutputStream} and creates a checksum
- * file. */
+/** Utility that wraps a {@link OutputStream} in a {@link DataOutputStream}.
+ */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class FSDataOutputStream extends DataOutputStream implements Syncable {




svn commit: r1491683 - in /hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common: CHANGES.txt src/main/java/org/apache/hadoop/ipc/Client.java src/main/java/org/apache/hadoop/ipc/Server.j

2013-06-10 Thread llu
Author: llu
Date: Tue Jun 11 03:31:10 2013
New Revision: 1491683

URL: http://svn.apache.org/r1491683
Log:
HADOOP-9630. [RPC v9] Remove IpcSerializationType. (Junping Du via llu)

Modified:

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1491683r1=1491682r2=1491683view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
Tue Jun 11 03:31:10 2013
@@ -30,17 +30,19 @@ Release 2.1.0-beta - UNRELEASED
 
 HADOOP-8886. Remove KFS support. (eli)
 
-HADOOP-9163 The rpc msg in ProtobufRpcEngine.proto should be moved out to
+HADOOP-9163. [RPC v9] The rpc msg in ProtobufRpcEngine.proto should be 
moved out to
 avoid an extra copy (Sanjay Radia)
 
-HADOOP-9151 Include RPC error info in RpcResponseHeader instead of sending
+HADOOP-9151. [RPC v9] Include RPC error info in RpcResponseHeader instead 
of sending
 it separately (sanjay Radia)
 
-HADOOP-9380 Add totalLength to rpc response  (sanjay Radia)
+HADOOP-9380. [RPC v9] Add totalLength to rpc response  (sanjay Radia)
 
-HADOOP-9425 Add error codes to rpc-response (sanjay Radia)
+HADOOP-9425. [RPC v9] Add error codes to rpc-response (sanjay Radia)
 
-HADOOP-9194. RPC support for QoS. (Junping Du via llu)
+HADOOP-9194. [RPC v9] RPC support for QoS. (Junping Du via llu)
+
+HADOOP-9630. [RPC v9] Remove IpcSerializationType. (Junping Du via llu)
 
   NEW FEATURES
 

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java?rev=1491683r1=1491682r2=1491683view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 (original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 Tue Jun 11 03:31:10 2013
@@ -748,8 +748,6 @@ public class Client {
  * +--+
  * |  Authmethod (1 byte) |  
  * +--+
- * |  IpcSerializationType (1 byte)   |  
- * +--+
  */
 private void writeConnectionHeader(OutputStream outStream)
 throws IOException {
@@ -759,7 +757,6 @@ public class Client {
   out.write(Server.CURRENT_VERSION);
   out.write(serviceClass);
   authMethod.write(out);
-  Server.IpcSerializationType.PROTOBUF.write(out);
   out.flush();
 }
 

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java?rev=1491683r1=1491682r2=1491683view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 (original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 Tue Jun 11 03:31:10 2013
@@ -162,22 +162,6 @@ public abstract class Server {
   public static final ByteBuffer HEADER = ByteBuffer.wrap(hrpc.getBytes());
   
   /**
-   * Serialization type for ConnectionContext and RpcRequestHeader
-   */
-  public enum IpcSerializationType {
-// Add new serialization type to the end without affecting the enum order
-PROTOBUF;
-
-void write(DataOutput out) throws IOException {
-  out.writeByte(this.ordinal());
-}
-
-static IpcSerializationType fromByte(byte b) {
-  return IpcSerializationType.values()[b];
-}
-  }
-  
-  /**
* If the user accidentally sends an HTTP GET to an IPC port, we detect this
* and send back a nicer response.
*/
@@ -1319,7 +1303,7 @@ public abstract class Server {
 if (!connectionHeaderRead) {
   //Every connection is expected to send the header.
   if (connectionHeaderBuf == null) {
-connectionHeaderBuf = 

svn commit: r1491682 - in /hadoop/common/trunk/hadoop-common-project/hadoop-common: CHANGES.txt src/main/java/org/apache/hadoop/ipc/Client.java src/main/java/org/apache/hadoop/ipc/Server.java

2013-06-10 Thread llu
Author: llu
Date: Tue Jun 11 03:30:41 2013
New Revision: 1491682

URL: http://svn.apache.org/r1491682
Log:
HADOOP-9630. [RPC v9] Remove IpcSerializationType. (Junping Du via llu)

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1491682r1=1491681r2=1491682view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Tue Jun 
11 03:30:41 2013
@@ -358,17 +358,19 @@ Release 2.1.0-beta - UNRELEASED
 
 HADOOP-8886. Remove KFS support. (eli)
 
-HADOOP-9163 The rpc msg in ProtobufRpcEngine.proto should be moved out to
+HADOOP-9163. [RPC v9] The rpc msg in ProtobufRpcEngine.proto should be 
moved out to
 avoid an extra copy (Sanjay Radia)
 
-HADOOP-9151 Include RPC error info in RpcResponseHeader instead of sending
+HADOOP-9151. [RPC v9] Include RPC error info in RpcResponseHeader instead 
of sending
 it separately (sanjay Radia)
 
-HADOOP-9380 Add totalLength to rpc response  (sanjay Radia)
+HADOOP-9380. [RPC v9] Add totalLength to rpc response  (sanjay Radia)
 
-HADOOP-9425 Add error codes to rpc-response (sanjay Radia)
+HADOOP-9425. [RPC v9] Add error codes to rpc-response (sanjay Radia)
 
-HADOOP-9194. RPC support for QoS. (Junping Du via llu)
+HADOOP-9194. [RPC v9] RPC support for QoS. (Junping Du via llu)
+
+HADOOP-9630. [RPC v9] Remove IpcSerializationType. (Junping Du via llu)
 
   NEW FEATURES
 

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java?rev=1491682r1=1491681r2=1491682view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 Tue Jun 11 03:30:41 2013
@@ -750,8 +750,6 @@ public class Client {
  * +--+
  * |  Authmethod (1 byte) |  
  * +--+
- * |  IpcSerializationType (1 byte)   |  
- * +--+
  */
 private void writeConnectionHeader(OutputStream outStream)
 throws IOException {
@@ -761,7 +759,6 @@ public class Client {
   out.write(Server.CURRENT_VERSION);
   out.write(serviceClass);
   authMethod.write(out);
-  Server.IpcSerializationType.PROTOBUF.write(out);
   out.flush();
 }
 

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java?rev=1491682r1=1491681r2=1491682view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 Tue Jun 11 03:30:41 2013
@@ -165,22 +165,6 @@ public abstract class Server {
   public static final ByteBuffer HEADER = ByteBuffer.wrap(hrpc.getBytes());
   
   /**
-   * Serialization type for ConnectionContext and RpcRequestHeader
-   */
-  public enum IpcSerializationType {
-// Add new serialization type to the end without affecting the enum order
-PROTOBUF;
-
-void write(DataOutput out) throws IOException {
-  out.writeByte(this.ordinal());
-}
-
-static IpcSerializationType fromByte(byte b) {
-  return IpcSerializationType.values()[b];
-}
-  }
-  
-  /**
* If the user accidentally sends an HTTP GET to an IPC port, we detect this
* and send back a nicer response.
*/
@@ -1322,7 +1306,7 @@ public abstract class Server {
 if (!connectionHeaderRead) {
   //Every connection is expected to send the header.
   if (connectionHeaderBuf == null) {
-connectionHeaderBuf = ByteBuffer.allocate(4);
+connectionHeaderBuf = ByteBuffer.allocate(3);
   }
   count = channelRead(channel, connectionHeaderBuf);
   if (count  0 ||