[40/52] [abbrv] hadoop git commit: YARN-3236. Cleanup RMAuthenticationFilter#AUTH_HANDLER_PROPERTY. Contributed by zhihai xu

2015-02-23 Thread zhz
YARN-3236. Cleanup RMAuthenticationFilter#AUTH_HANDLER_PROPERTY.
Contributed by zhihai xu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3d29024
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3d29024
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3d29024

Branch: refs/heads/HDFS-7285
Commit: e3d290244c8a39edc37146d992cf34e6963b6851
Parents: 92d67ac
Author: Xuan xg...@apache.org
Authored: Sat Feb 21 16:18:40 2015 -0800
Committer: Xuan xg...@apache.org
Committed: Sat Feb 21 16:18:40 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop/yarn/server/security/http/RMAuthenticationFilter.java  | 2 --
 2 files changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3d29024/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1650a20..1982688 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -315,6 +315,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3237. AppLogAggregatorImpl fails to log error cause.
 (Rushabh S Shah via xgong)
 
+YARN-3236. Cleanup RMAuthenticationFilter#AUTH_HANDLER_PROPERTY.
+(zhihai xu via xgong)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3d29024/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilter.java
index 3eeb620..c3497a9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilter.java
@@ -40,8 +40,6 @@ public class RMAuthenticationFilter extends
 DelegationTokenAuthenticationFilter {
 
   static private AbstractDelegationTokenSecretManager? manager;
-  public static final String AUTH_HANDLER_PROPERTY =
-  yarn.resourcemanager.authentication-handler;
   private static final String OLD_HEADER = Hadoop-YARN-Auth-Delegation-Token;
 
   public RMAuthenticationFilter() {



[38/52] [abbrv] hadoop git commit: HDFS-7806. Refactor: move StorageType from hadoop-hdfs to hadoop-common. (Contributed by Xiaoyu Yao)

2015-02-23 Thread zhz
HDFS-7806. Refactor: move StorageType from hadoop-hdfs to hadoop-common. 
(Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b465b4b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b465b4b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b465b4b

Branch: refs/heads/HDFS-7285
Commit: 8b465b4b8caed31ca9daeaae108f9a868a30a455
Parents: 709ff99
Author: Arpit Agarwal a...@apache.org
Authored: Sat Feb 21 15:38:35 2015 -0800
Committer: Arpit Agarwal a...@apache.org
Committed: Sat Feb 21 15:38:35 2015 -0800

--
 .../java/org/apache/hadoop/fs/StorageType.java  | 94 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../apache/hadoop/hdfs/BlockReaderFactory.java  |  1 +
 .../apache/hadoop/hdfs/BlockReaderLocal.java|  1 +
 .../hadoop/hdfs/BlockReaderLocalLegacy.java |  1 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  1 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  1 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  3 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  1 +
 .../org/apache/hadoop/hdfs/StorageType.java | 94 
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|  2 +-
 .../hdfs/protocol/BlockStoragePolicy.java   |  2 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java|  2 +-
 .../hdfs/protocol/DatanodeInfoWithStorage.java  |  2 +-
 .../hadoop/hdfs/protocol/LocatedBlock.java  |  2 +-
 .../QuotaByStorageTypeExceededException.java|  2 +-
 .../datatransfer/DataTransferProtocol.java  |  2 +-
 .../hdfs/protocol/datatransfer/Sender.java  |  2 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  2 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  2 +-
 .../hadoop/hdfs/server/balancer/Balancer.java   |  2 +-
 .../hdfs/server/balancer/BalancingPolicy.java   |  2 +-
 .../hadoop/hdfs/server/balancer/Dispatcher.java |  2 +-
 .../server/blockmanagement/BlockManager.java|  2 +-
 .../blockmanagement/BlockPlacementPolicy.java   |  2 +-
 .../BlockPlacementPolicyDefault.java|  2 +-
 .../BlockPlacementPolicyWithNodeGroup.java  |  3 +-
 .../BlockStoragePolicySuite.java|  2 +-
 .../blockmanagement/DatanodeDescriptor.java |  2 +-
 .../blockmanagement/DatanodeStorageInfo.java|  2 +-
 .../hdfs/server/datanode/BPOfferService.java|  2 +-
 .../hdfs/server/datanode/BlockReceiver.java |  2 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  2 +-
 .../hdfs/server/datanode/DataXceiver.java   |  2 +-
 .../server/datanode/ReportBadBlockAction.java   |  2 +-
 .../hdfs/server/datanode/StorageLocation.java   |  2 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |  2 +-
 .../server/datanode/fsdataset/FsVolumeSpi.java  |  2 +-
 .../RoundRobinVolumeChoosingPolicy.java |  1 -
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  2 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  2 +-
 .../datanode/fsdataset/impl/FsVolumeList.java   |  2 +-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |  6 +-
 .../namenode/DirectoryWithQuotaFeature.java |  2 +-
 .../hdfs/server/namenode/FSDirAttrOp.java   |  2 +-
 .../hdfs/server/namenode/FSDirConcatOp.java |  2 +-
 .../hdfs/server/namenode/FSDirectory.java   |  2 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  2 +-
 .../hdfs/server/namenode/FSEditLogOp.java   |  2 +-
 .../hadoop/hdfs/server/namenode/FSImage.java|  2 +-
 .../server/namenode/FSImageFormatPBINode.java   |  2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../hdfs/server/namenode/INodeDirectory.java|  4 +-
 .../namenode/INodeDirectoryAttributes.java  |  2 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  2 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  2 +-
 .../namenode/QuotaByStorageTypeEntry.java   |  2 +-
 .../hdfs/server/namenode/QuotaCounts.java   |  2 +-
 .../snapshot/FSImageFormatPBSnapshot.java   |  2 +-
 .../snapshot/FileWithSnapshotFeature.java   |  2 +-
 .../hdfs/server/protocol/BlockCommand.java  |  2 +-
 .../server/protocol/BlocksWithLocations.java|  2 +-
 .../hdfs/server/protocol/DatanodeStorage.java   |  2 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  2 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java | 11 ++-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |  1 +
 .../hdfs/MiniDFSClusterWithNodeGroup.java   |  4 +-
 .../hadoop/hdfs/TestBlockStoragePolicy.java |  1 +
 .../hadoop/hdfs/TestDataTransferProtocol.java   |  1 +
 .../hdfs/TestWriteBlockGetsBlockLengthHint.java |  1 +
 .../hadoop/hdfs/protocolPB/TestPBHelper.java|  2 +-
 .../hdfs/server/balancer/TestBalancer.java  |  6 +-
 .../blockmanagement/TestBlockManager.java   |  2 +-
 .../blockmanagement/TestDatanodeManager.java|  2 +-
 .../blockmanagement/TestReplicationPolicy.java  |  2 +-
 

[05/52] [abbrv] hadoop git commit: HADOOP-11599. Client#getTimeout should use IPC_CLIENT_PING_DEFAULT when IPC_CLIENT_PING_KEY is not configured. Contributed by zhihai xu.

2015-02-23 Thread zhz
HADOOP-11599. Client#getTimeout should use IPC_CLIENT_PING_DEFAULT when 
IPC_CLIENT_PING_KEY is not configured. Contributed by zhihai xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f56a4cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f56a4cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f56a4cb

Branch: refs/heads/HDFS-7285
Commit: 3f56a4cb0c57583e285e85a4d0c1584c4de9f1f1
Parents: b6fc1f3
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Feb 18 17:32:50 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed Feb 18 17:35:11 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt| 3 +++
 .../src/main/java/org/apache/hadoop/ipc/Client.java| 3 ++-
 .../src/test/java/org/apache/hadoop/ipc/TestIPC.java   | 6 ++
 3 files changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f56a4cb/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b3b2c95..f248555 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -959,6 +959,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11295. RPC Server Reader thread can't shutdown if RPCCallQueue is
 full. (Ming Ma via kihwal)
 
+HADOOP-11599. Client#getTimeout should use IPC_CLIENT_PING_DEFAULT when 
+IPC_CLIENT_PING_KEY is not configured. (zhihai xu via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f56a4cb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index bdcb96c..3f93c42 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -236,7 +236,8 @@ public class Client {
* @return the timeout period in milliseconds. -1 if no timeout value is set
*/
   final public static int getTimeout(Configuration conf) {
-if (!conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true)) {
+if (!conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY,
+CommonConfigurationKeys.IPC_CLIENT_PING_DEFAULT)) {
   return getPingInterval(conf);
 }
 return -1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f56a4cb/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 04a7412..eb19f48 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -1235,6 +1235,12 @@ public class TestIPC {
 }
   }
 
+  @Test
+  public void testClientGetTimeout() throws IOException {
+Configuration config = new Configuration();
+assertEquals(Client.getTimeout(config), -1);
+  }
+
   private void assertRetriesOnSocketTimeouts(Configuration conf,
   int maxTimeoutRetries) throws IOException {
 SocketFactory mockFactory = Mockito.mock(SocketFactory.class);



[24/52] [abbrv] hadoop git commit: HDFS-7788. Post-2.6 namenode may not start up with an image containing inodes created with an old release. Contributed by Rushabh Shah.

2015-02-23 Thread zhz
HDFS-7788. Post-2.6 namenode may not start up with an image containing inodes 
created with an old release. Contributed by Rushabh Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ae5255a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ae5255a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ae5255a

Branch: refs/heads/HDFS-7285
Commit: 7ae5255a1613ccfb43646f33eabacf1062c86e93
Parents: b9a1790
Author: Kihwal Lee kih...@apache.org
Authored: Fri Feb 20 09:06:07 2015 -0600
Committer: Kihwal Lee kih...@apache.org
Committed: Fri Feb 20 09:09:02 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 ++
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   3 ++
 .../apache/hadoop/hdfs/util/LongBitFormat.java  |   4 ++
 .../hdfs/server/namenode/TestFSImage.java   |  48 +++
 .../resources/image-with-zero-block-size.tar.gz | Bin 0 - 1452 bytes
 5 files changed, 58 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ae5255a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5f3cc02..71ce48f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -992,6 +992,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7808. Remove obsolete -ns options in in DFSHAAdmin.java.
 (Arshad Mohammad via wheat9)
 
+HDFS-7788. Post-2.6 namenode may not start up with an image containing
+inodes created with an old release. (Rushabh Shah via kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ae5255a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 24e25ec..3743bf0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -107,6 +107,9 @@ public class INodeFile extends INodeWithAdditionalFields
 static long toLong(long preferredBlockSize, short replication,
 byte storagePolicyID) {
   long h = 0;
+  if (preferredBlockSize == 0) {
+preferredBlockSize = PREFERRED_BLOCK_SIZE.BITS.getMin();
+  }
   h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
   h = REPLICATION.BITS.combine(replication, h);
   h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ae5255a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
index 863d9f7..9399d84 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -64,4 +64,8 @@ public class LongBitFormat implements Serializable {
 }
 return (record  ~MASK) | (value  OFFSET);
   }
+  
+  public long getMin() {
+return MIN;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ae5255a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index c68ae04..f7dad18 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -28,10 +28,13 @@ import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import 

[51/52] [abbrv] hadoop git commit: HDFS-7716. Erasure Coding: extend BlockInfo to handle EC info. Contributed by Jing Zhao.

2015-02-23 Thread zhz
HDFS-7716. Erasure Coding: extend BlockInfo to handle EC info. Contributed by 
Jing Zhao.

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d1ec74d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d1ec74d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d1ec74d

Branch: refs/heads/HDFS-7285
Commit: 9d1ec74d005f20d1e371be3026b21595d6106b60
Parents: 0d3b462
Author: Jing Zhao ji...@apache.org
Authored: Tue Feb 10 17:54:10 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 23 11:21:13 2015 -0800

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |   1 +
 .../server/blockmanagement/BlockCollection.java |  13 +-
 .../server/blockmanagement/BlockIdManager.java  |   7 +-
 .../hdfs/server/blockmanagement/BlockInfo.java  | 339 +
 .../blockmanagement/BlockInfoContiguous.java| 363 +++
 .../BlockInfoContiguousUnderConstruction.java   | 140 +--
 .../blockmanagement/BlockInfoStriped.java   | 179 +
 .../server/blockmanagement/BlockManager.java| 188 +-
 .../hdfs/server/blockmanagement/BlocksMap.java  |  46 +--
 .../CacheReplicationMonitor.java|  10 +-
 .../blockmanagement/DatanodeDescriptor.java |  22 +-
 .../blockmanagement/DatanodeStorageInfo.java|  38 +-
 .../ReplicaUnderConstruction.java   | 119 ++
 .../hdfs/server/namenode/FSDirectory.java   |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  24 +-
 .../hdfs/server/namenode/NamenodeFsck.java  |   3 +-
 .../snapshot/FSImageFormatPBSnapshot.java   |   4 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   4 +-
 .../server/blockmanagement/TestBlockInfo.java   |   6 +-
 .../blockmanagement/TestBlockInfoStriped.java   | 219 +++
 .../blockmanagement/TestBlockManager.java   |   4 +-
 .../blockmanagement/TestReplicationPolicy.java  |   2 +-
 22 files changed, 1127 insertions(+), 608 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d1ec74d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index e3e3f37..acf4853 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -184,5 +184,6 @@ public class HdfsConstants {
 
   public static final byte NUM_DATA_BLOCKS = 3;
   public static final byte NUM_PARITY_BLOCKS = 2;
+  public static final long BLOCK_GROUP_INDEX_MASK = 15;
   public static final byte MAX_BLOCKS_IN_GROUP = 16;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d1ec74d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 1547611..974cac3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -39,12 +39,12 @@ public interface BlockCollection {
   public ContentSummary computeContentSummary();
 
   /**
-   * @return the number of blocks
+   * @return the number of blocks or block groups
*/ 
   public int numBlocks();
 
   /**
-   * Get the blocks.
+   * Get the blocks or block groups.
*/
   public BlockInfoContiguous[] getBlocks();
 
@@ -55,8 +55,8 @@ public interface BlockCollection {
   public long getPreferredBlockSize();
 
   /**
-   * Get block replication for the collection 
-   * @return block replication value
+   * Get block replication for the collection.
+   * @return block replication value. Return 0 if the file is erasure coded.
*/
   public short getBlockReplication();
 
@@ -71,7 +71,7 @@ public interface BlockCollection {
   public String getName();
 
   /**
-   * Set the block at the given index.
+   * Set the block/block-group at the given index.
*/
   public void setBlock(int index, BlockInfoContiguous blk);
 
@@ -79,7 +79,8 @@ public interface 

[11/52] [abbrv] hadoop git commit: HDFS-7808. Remove obsolete -ns options in in DFSHAAdmin.java. Contributed by Arshad Mohammad.

2015-02-23 Thread zhz
HDFS-7808. Remove obsolete -ns options in in DFSHAAdmin.java. Contributed by 
Arshad Mohammad.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a3e2920
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a3e2920
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a3e2920

Branch: refs/heads/HDFS-7285
Commit: 9a3e29208740da94d0cca5bb1c8163bea60d1387
Parents: 2aa9979
Author: Haohui Mai whe...@apache.org
Authored: Wed Feb 18 15:14:39 2015 -0800
Committer: Haohui Mai whe...@apache.org
Committed: Wed Feb 18 15:15:08 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../apache/hadoop/hdfs/tools/DFSHAAdmin.java| 20 
 .../hadoop/hdfs/tools/TestDFSHAAdmin.java   | 20 
 .../hdfs/tools/TestDFSHAAdminMiniCluster.java   |  3 ---
 4 files changed, 3 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a3e2920/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 70eae1c..3735e90 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -982,6 +982,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-6662. WebHDFS cannot open a file if its path contains %.
 (Gerson Carlos via wheat9)
 
+HDFS-7808. Remove obsolete -ns options in in DFSHAAdmin.java.
+(Arshad Mohammad via wheat9)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a3e2920/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
index 1ec6d35..6b6fb30 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.tools;
 
 import java.io.PrintStream;
-import java.util.Arrays;
 import java.util.Collection;
 
 import org.apache.commons.logging.Log;
@@ -98,25 +97,6 @@ public class DFSHAAdmin extends HAAdmin {
   printUsage(errOut);
   return -1;
 }
-
-int i = 0;
-String cmd = argv[i++];
-
-if (-ns.equals(cmd)) {
-  if (i == argv.length) {
-errOut.println(Missing nameservice ID);
-printUsage(errOut);
-return -1;
-  }
-  nameserviceId = argv[i++];
-  if (i = argv.length) {
-errOut.println(Missing command);
-printUsage(errOut);
-return -1;
-  }
-  argv = Arrays.copyOfRange(argv, i, argv.length);
-}
-
 return super.runCmd(argv);
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a3e2920/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
index 33da4d4..8ecc71a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
@@ -147,17 +147,6 @@ public class TestDFSHAAdmin {
   }
   
   @Test
-  public void testNameserviceOption() throws Exception {
-assertEquals(-1, runTool(-ns));
-assertOutputContains(Missing nameservice ID);
-assertEquals(-1, runTool(-ns, ns1));
-assertOutputContains(Missing command);
-// ns1 isn't defined but we check this lazily and help doesn't use the ns
-assertEquals(0, runTool(-ns, ns1, -help, transitionToActive));
-assertOutputContains(Transitions the service into Active);
-  }
-
-  @Test
   public void testNamenodeResolution() throws Exception {
 
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
 assertEquals(0, runTool(-getServiceState, nn1));
@@ -279,15 +268,6 @@ public class TestDFSHAAdmin {
   }
 
   @Test
-  public void testFailoverWithFencerAndNameservice() throws Exception {
-
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
-HdfsConfiguration conf = getHAConf();
-

[15/52] [abbrv] hadoop git commit: YARN-1615. Fix typos in delay scheduler's description. Contributed by Akira Ajisaka.

2015-02-23 Thread zhz
YARN-1615. Fix typos in delay scheduler's description. Contributed by Akira 
Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8a14efd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8a14efd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8a14efd

Branch: refs/heads/HDFS-7285
Commit: b8a14efdf535d42bcafa58d380bd2c7f4d36f8cb
Parents: 1c03376
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Thu Feb 19 10:24:07 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Thu Feb 19 10:24:07 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt| 3 +++
 .../server/resourcemanager/scheduler/fair/FSAppAttempt.java| 6 +++---
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8a14efd/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 91ce11f..6b2b878 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -617,6 +617,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3132. RMNodeLabelsManager should remove node from node-to-label 
mapping
 when node becomes deactivated. (Wangda Tan via jianhe)
 
+YARN-1615. Fix typos in description about delay scheduling. (Akira Ajisaka 
via 
+ozawa)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8a14efd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 7d26396..67103d1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -80,10 +80,10 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 
   /**
* Delay scheduling: We often want to prioritize scheduling of node-local
-   * containers over rack-local or off-switch containers. To acheive this
-   * we first only allow node-local assigments for a given prioirty level,
+   * containers over rack-local or off-switch containers. To achieve this
+   * we first only allow node-local assignments for a given priority level,
* then relax the locality threshold once we've had a long enough period
-   * without succesfully scheduling. We measure both the number of missed
+   * without successfully scheduling. We measure both the number of missed
* scheduling opportunities since the last container was scheduled
* at the current allowed level and the time since the last container
* was scheduled. Currently we use only the former.



[46/52] [abbrv] hadoop git commit: HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding (Kai Zheng via umamahesh)

2015-02-23 Thread zhz
HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding (Kai 
Zheng via umamahesh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65dca224
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65dca224
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65dca224

Branch: refs/heads/HDFS-7285
Commit: 65dca22456925a0516f9e515ee5fe901f918fae6
Parents: 08a9ac3
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Thu Jan 29 14:15:13 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 23 11:20:19 2015 -0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  4 +
 .../apache/hadoop/io/erasurecode/ECChunk.java   | 82 +
 .../rawcoder/AbstractRawErasureCoder.java   | 63 +
 .../rawcoder/AbstractRawErasureDecoder.java | 93 
 .../rawcoder/AbstractRawErasureEncoder.java | 93 
 .../erasurecode/rawcoder/RawErasureCoder.java   | 78 
 .../erasurecode/rawcoder/RawErasureDecoder.java | 55 
 .../erasurecode/rawcoder/RawErasureEncoder.java | 54 
 8 files changed, 522 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65dca224/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
new file mode 100644
index 000..8ce5a89
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -0,0 +1,4 @@
+  BREAKDOWN OF HADOOP-11264 SUBTASKS AND RELATED JIRAS (Common part of 
HDFS-7285)
+
+HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding
+(Kai Zheng via umamahesh)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65dca224/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
new file mode 100644
index 000..f84eb11
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A wrapper for ByteBuffer or bytes array for an erasure code chunk.
+ */
+public class ECChunk {
+
+  private ByteBuffer chunkBuffer;
+
+  /**
+   * Wrapping a ByteBuffer
+   * @param buffer
+   */
+  public ECChunk(ByteBuffer buffer) {
+this.chunkBuffer = buffer;
+  }
+
+  /**
+   * Wrapping a bytes array
+   * @param buffer
+   */
+  public ECChunk(byte[] buffer) {
+this.chunkBuffer = ByteBuffer.wrap(buffer);
+  }
+
+  /**
+   * Convert to ByteBuffer
+   * @return ByteBuffer
+   */
+  public ByteBuffer getBuffer() {
+return chunkBuffer;
+  }
+
+  /**
+   * Convert an array of this chunks to an array of ByteBuffers
+   * @param chunks
+   * @return an array of ByteBuffers
+   */
+  public static ByteBuffer[] toBuffers(ECChunk[] chunks) {
+ByteBuffer[] buffers = new ByteBuffer[chunks.length];
+
+for (int i = 0; i  chunks.length; i++) {
+  buffers[i] = chunks[i].getBuffer();
+}
+
+return buffers;
+  }
+
+  /**
+   * Convert an array of this chunks to an array of byte array
+   * @param chunks
+   * @return an array of byte array
+   */
+  public static byte[][] toArray(ECChunk[] chunks) {
+byte[][] bytesArr = new byte[chunks.length][];
+
+for (int i = 0; i  chunks.length; i++) {
+  bytesArr[i] = chunks[i].getBuffer().array();
+}
+
+return bytesArr;
+  }
+}


[27/52] [abbrv] hadoop git commit: YARN-3230. Clarify application states on the web UI. (Jian He via wangda)

2015-02-23 Thread zhz
YARN-3230. Clarify application states on the web UI. (Jian He via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce5bf927
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce5bf927
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce5bf927

Branch: refs/heads/HDFS-7285
Commit: ce5bf927c3d9f212798de1bf8706e5e9def235a1
Parents: c33ae27
Author: Wangda Tan wan...@apache.org
Authored: Fri Feb 20 10:39:28 2015 -0800
Committer: Wangda Tan wan...@apache.org
Committed: Fri Feb 20 10:39:28 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  2 ++
 .../server/resourcemanager/webapp/AppBlock.java | 33 ++--
 .../resourcemanager/webapp/AppsBlock.java   |  6 ++--
 .../resourcemanager/webapp/dao/AppInfo.java |  8 ++---
 4 files changed, 41 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce5bf927/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e71da2d..c028043 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -310,6 +310,8 @@ Release 2.7.0 - UNRELEASED
 YARN-2799. Cleanup TestLogAggregationService based on the change in 
YARN-90.
 (Zhihai Xu via junping_du)
 
+YARN-3230. Clarify application states on the web UI. (Jian He via wangda)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce5bf927/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
index 1856d75..c2b376e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
@@ -32,8 +32,10 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -131,8 +133,9 @@ public class AppBlock extends HtmlBlock {
 ._(Name:, app.getName())
 ._(Application Type:, app.getApplicationType())
 ._(Application Tags:, app.getApplicationTags())
-._(State:, app.getState())
-._(FinalStatus:, app.getFinalStatus())
+._(YarnApplicationState:, clarifyAppState(app.getState()))
+._(FinalStatus reported by AM:,
+  clairfyAppFinalStatus(app.getFinalStatus()))
 ._(Started:, Times.format(app.getStartTime()))
 ._(Elapsed:,
 StringUtils.formatTime(Times.elapsed(app.getStartTime(),
@@ -198,4 +201,30 @@ public class AppBlock extends HtmlBlock {
 table._();
 div._();
   }
+
+  private String clarifyAppState(YarnApplicationState state) {
+String ret = state.toString();
+switch (state) {
+case NEW:
+  return ret + : waiting for application to be initialized;
+case NEW_SAVING:
+  return ret + : waiting for application to be persisted in state-store.;
+case SUBMITTED:
+  return ret + : waiting for application to be accepted by scheduler.;
+case ACCEPTED:
+  return ret + : waiting for AM container to be allocated, launched and
+  +  register with RM.;
+case RUNNING:
+  return ret + : AM has registered with RM and started running.;
+default:
+  return ret;
+}
+  }
+
+  private String clairfyAppFinalStatus(FinalApplicationStatus status) {
+if (status == FinalApplicationStatus.UNDEFINED) {
+  return Application has not completed 

[25/52] [abbrv] hadoop git commit: YARN-3194. RM should handle NMContainerStatuses sent by NM while registering if NM is Reconnected node. Contributed by Rohith

2015-02-23 Thread zhz
YARN-3194. RM should handle NMContainerStatuses sent by NM while registering if 
NM is Reconnected node. Contributed by Rohith


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a64dd3d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a64dd3d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a64dd3d2

Branch: refs/heads/HDFS-7285
Commit: a64dd3d24bfcb9af21eb63869924f6482b147fd3
Parents: 7ae5255
Author: Jason Lowe jl...@apache.org
Authored: Fri Feb 20 15:08:48 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Fri Feb 20 15:10:10 2015 +

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../resourcemanager/ResourceTrackerService.java |   9 +-
 .../resourcemanager/rmnode/RMNodeImpl.java  | 111 ++---
 .../rmnode/RMNodeReconnectEvent.java|   9 +-
 .../resourcemanager/TestApplicationCleanup.java | 121 +++
 .../resourcemanager/TestRMNodeTransitions.java  |   4 +-
 6 files changed, 209 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a64dd3d2/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cac6680..8ec2409 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -626,6 +626,9 @@ Release 2.7.0 - UNRELEASED
 YARN-933. Fixed InvalidStateTransitonException at FINAL_SAVING state in
 RMApp. (Rohith Sharmaks via jianhe)
 
+YARN-3194. RM should handle NMContainerStatuses sent by NM while
+registering if NM is Reconnected node (Rohith via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a64dd3d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index 61a0349..0de556b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -312,9 +312,12 @@ public class ResourceTrackerService extends 
AbstractService implements
 } else {
   LOG.info(Reconnect from the node at:  + host);
   this.nmLivelinessMonitor.unregister(nodeId);
-  this.rmContext.getDispatcher().getEventHandler().handle(
-  new RMNodeReconnectEvent(nodeId, rmNode,
-  request.getRunningApplications()));
+  this.rmContext
+  .getDispatcher()
+  .getEventHandler()
+  .handle(
+  new RMNodeReconnectEvent(nodeId, rmNode, request
+  .getRunningApplications(), 
request.getNMContainerStatuses()));
 }
 // On every node manager register we will be clearing NMToken keys if
 // present for any running application.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a64dd3d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 1bc98b2..9701775 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -601,6 +601,8 @@ public class RMNodeImpl implements RMNode, 
EventHandlerRMNodeEvent {
 rmNode.httpAddress = newNode.getHttpAddress();
 rmNode.totalCapability = newNode.getTotalCapability();
   

[36/52] [abbrv] hadoop git commit: HADOOP-11584 s3a file block size set to 0 in getFileStatus. (Brahma Reddy Battula via stevel)

2015-02-23 Thread zhz
HADOOP-11584 s3a file block size set to 0 in getFileStatus. (Brahma Reddy 
Battula via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/709ff99c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/709ff99c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/709ff99c

Branch: refs/heads/HDFS-7285
Commit: 709ff99cff4124823bde631e272af7be9a22f83b
Parents: 737bad0
Author: Steve Loughran ste...@apache.org
Authored: Sat Feb 21 12:02:41 2015 +
Committer: Steve Loughran ste...@apache.org
Committed: Sat Feb 21 12:03:03 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../org/apache/hadoop/fs/s3a/S3AFileStatus.java |  5 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 41 +
 .../apache/hadoop/fs/s3a/TestS3ABlocksize.java  | 93 
 .../src/test/resources/log4j.properties | 18 
 5 files changed, 143 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/709ff99c/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 143692e..373a485 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -984,6 +984,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11612. Workaround for Curator's ChildReaper requiring Guava 15+.
 (rkanter)
 
+HADOOP-11584 s3a file block size set to 0 in getFileStatus.
+(Brahma Reddy Battula via stevel)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/709ff99c/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
index eb64492..85e0ef7 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
@@ -31,8 +31,9 @@ public class S3AFileStatus extends FileStatus {
   }
 
   // Files
-  public S3AFileStatus(long length, long modification_time, Path path) {
-super(length, false, 1, 0, modification_time, path);
+  public S3AFileStatus(long length, long modification_time, Path path,
+  long blockSize) {
+super(length, false, 1, blockSize, modification_time, path);
 isEmptyDirectory = false;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/709ff99c/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index eaa5f2d..2373e7e 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -77,6 +77,10 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class S3AFileSystem extends FileSystem {
+  /**
+   * Default blocksize as used in blocksize and FS status queries
+   */
+  public static final int DEFAULT_BLOCKSIZE = 32 * 1024 * 1024;
   private URI uri;
   private Path workingDir;
   private AmazonS3Client s3;
@@ -256,7 +260,7 @@ public class S3AFileSystem extends FileSystem {
 }
 long keepAliveTime = conf.getLong(KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME);
 LinkedBlockingQueueRunnable workQueue =
-  new LinkedBlockingQueueRunnable(maxThreads *
+  new LinkedBlockingQueue(maxThreads *
 conf.getInt(MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS));
 ThreadPoolExecutor tpe = new ThreadPoolExecutor(
 coreThreads,
@@ -434,7 +438,7 @@ public class S3AFileSystem extends FileSystem {
 String srcKey = pathToKey(src);
 String dstKey = pathToKey(dst);
 
-if (srcKey.length() == 0 || dstKey.length() == 0) {
+if (srcKey.isEmpty() || dstKey.isEmpty()) {
   if (LOG.isDebugEnabled()) {
 LOG.debug(rename: src or dst are empty);
   }
@@ -526,7 +530,7 @@ public class S3AFileSystem extends FileSystem {
   }
 
   ListDeleteObjectsRequest.KeyVersion keysToDelete = 
-new ArrayListDeleteObjectsRequest.KeyVersion();
+new ArrayList();
   if (dstStatus != null  dstStatus.isEmptyDirectory()) {
 // delete unnecessary fake directory.
 keysToDelete.add(new 

[20/52] [abbrv] hadoop git commit: HADOOP-9087. Queue size metric for metric sinks isn't actually maintained. Contributed by Akira AJISAKA

2015-02-23 Thread zhz
HADOOP-9087. Queue size metric for metric sinks isn't actually maintained. 
Contributed by Akira AJISAKA


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f0f29926
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f0f29926
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f0f29926

Branch: refs/heads/HDFS-7285
Commit: f0f299268625af275522f55d5bfc43118c31bdd8
Parents: 2fd02af
Author: Jason Lowe jl...@apache.org
Authored: Thu Feb 19 17:30:07 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Thu Feb 19 17:30:07 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../metrics2/impl/MetricsSinkAdapter.java   | 15 +-
 .../hadoop-common/src/site/markdown/Metrics.md  |  2 +-
 .../metrics2/impl/TestMetricsSystemImpl.java| 50 
 4 files changed, 67 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0f29926/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c01e3d6..8d3f9f5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -973,6 +973,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11595. Add default implementation for AbstractFileSystem#truncate.
 (yliu)
 
+HADOOP-9087. Queue size metric for metric sinks isn't actually maintained
+(Akira AJISAKA via jlowe)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0f29926/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java
index 9add494..ed52317 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java
@@ -95,7 +95,10 @@ class MetricsSinkAdapter implements 
SinkQueue.ConsumerMetricsBuffer {
   boolean putMetrics(MetricsBuffer buffer, long logicalTime) {
 if (logicalTime % period == 0) {
   LOG.debug(enqueue, logicalTime=+ logicalTime);
-  if (queue.enqueue(buffer)) return true;
+  if (queue.enqueue(buffer)) {
+refreshQueueSizeGauge();
+return true;
+  }
   dropped.incr();
   return false;
 }
@@ -105,7 +108,9 @@ class MetricsSinkAdapter implements 
SinkQueue.ConsumerMetricsBuffer {
   public boolean putMetricsImmediate(MetricsBuffer buffer) {
 WaitableMetricsBuffer waitableBuffer =
 new WaitableMetricsBuffer(buffer);
-if (!queue.enqueue(waitableBuffer)) {
+if (queue.enqueue(waitableBuffer)) {
+  refreshQueueSizeGauge();
+} else {
   LOG.warn(name +  has a full queue and can't consume the given 
metrics.);
   dropped.incr();
   return false;
@@ -127,6 +132,7 @@ class MetricsSinkAdapter implements 
SinkQueue.ConsumerMetricsBuffer {
 while (!stopping) {
   try {
 queue.consumeAll(this);
+refreshQueueSizeGauge();
 retryDelay = firstRetryDelay;
 n = retryCount;
 inError = false;
@@ -151,12 +157,17 @@ class MetricsSinkAdapter implements 
SinkQueue.ConsumerMetricsBuffer {
   suppressing further error messages, e);
   }
   queue.clear();
+  refreshQueueSizeGauge();
   inError = true; // Don't keep complaining ad infinitum
 }
   }
 }
   }
 
+  private void refreshQueueSizeGauge() {
+qsize.set(queue.size());
+  }
+
   @Override
   public void consume(MetricsBuffer buffer) {
 long ts = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0f29926/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index dbcf0d8..6953c3b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -434,7 +434,7 @@ MetricsSystem shows the statistics for metrics snapshots 
and publishes. Each met
 | `Sink_`*instance*`NumOps` | Total number of sink operations for the 
*instance* |
 | `Sink_`*instance*`AvgTime` | Average time in 

[35/52] [abbrv] hadoop git commit: HADOOP-11612. Workaround for Curator's ChildReaper requiring Guava 15+. (rkanter)

2015-02-23 Thread zhz
HADOOP-11612. Workaround for Curator's ChildReaper requiring Guava 15+. 
(rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f013303
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f013303
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f013303

Branch: refs/heads/HDFS-7285
Commit: 6f0133039a064ca82363ac6f29fb255506f31b8a
Parents: 0d6af57
Author: Robert Kanter rkan...@apache.org
Authored: Fri Feb 20 19:47:28 2015 -0800
Committer: Robert Kanter rkan...@apache.org
Committed: Fri Feb 20 19:47:28 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../apache/hadoop/util/curator/ChildReaper.java | 234 +++
 .../hadoop/util/curator/TestChildReaper.java| 208 +
 3 files changed, 445 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f013303/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b09868a..143692e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -981,6 +981,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11604. Prevent ConcurrentModificationException while closing domain
 sockets during shutdown of DomainSocketWatcher thread. (cnauroth)
 
+HADOOP-11612. Workaround for Curator's ChildReaper requiring Guava 15+.
+(rkanter)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f013303/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ChildReaper.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ChildReaper.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ChildReaper.java
new file mode 100644
index 000..3bff187
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ChildReaper.java
@@ -0,0 +1,234 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.util.curator;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Sets;
+import org.apache.curator.framework.recipes.locks.Reaper;
+import org.apache.curator.utils.CloseableUtils;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.utils.CloseableScheduledExecutorService;
+import org.apache.curator.utils.ThreadUtils;
+import org.apache.curator.utils.ZKPaths;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+import org.apache.curator.utils.PathUtils;
+
+/**
+ * This is a copy of Curator 2.7.1's ChildReaper class, modified to work with
+ * Guava 11.0.2.  The problem is the 'paths' Collection, which calls Guava's
+ * Sets.newConcurrentHashSet(), which was added in Guava 15.0.
+ * p
+ * Utility to reap empty child nodes of a parent node. Periodically calls 
getChildren on
+ * the node and adds empty nodes to an internally managed {@link Reaper}
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class ChildReaper implements Closeable
+{
+  private final Logger log = LoggerFactory.getLogger(getClass());
+  private final Reaper reaper;
+  private final AtomicReferenceState state = new 
AtomicReferenceState(State.LATENT);
+ 

[11/50] [abbrv] hadoop git commit: HDFS-7711. Fix various typos in ClusterSetup.md. Contributed by Brahma Reddy Battula.

2015-02-23 Thread zhz
HDFS-7711. Fix various typos in ClusterSetup.md. Contributed by Brahma Reddy 
Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2fe828c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2fe828c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2fe828c

Branch: refs/heads/HDFS-EC
Commit: c2fe828cb06ebbe75e329bebf42cc9ef64a9f321
Parents: f14f01b
Author: Haohui Mai whe...@apache.org
Authored: Thu Feb 12 15:39:35 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:47 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/site/markdown/ClusterSetup.md   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2fe828c/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7078d42..e891755 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -923,6 +923,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11587. TestMapFile#testMainMethodMapFile creates test files in
 hadoop-common project root. (Xiaoyu Yao via wheat9)
 
+HDFS-7711. Fix various typos in ClusterSetup.md.
+(Brahma Reddy Battula via wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2fe828c/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
index 80c7d58..a8f9147 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
@@ -130,7 +130,7 @@ This section deals with important parameters to be 
specified in the given config
 | Parameter | Value | Notes |
 |: |: |: |
 | `dfs.namenode.name.dir` | Path on the local filesystem where the NameNode 
stores the namespace and transactions logs persistently. | If this is a 
comma-delimited list of directories then the name table is replicated in all of 
the directories, for redundancy. |
-| `dfs.namenode.hosts` / `dfs.namenode.hosts.exclude` | List of 
permitted/excluded DataNodes. | If necessary, use these files to control the 
list of allowable datanodes. |
+| `dfs.hosts` / `dfs.hosts.exclude` | List of permitted/excluded DataNodes. | 
If necessary, use these files to control the list of allowable datanodes. |
 | `dfs.blocksize` | 268435456 | HDFS blocksize of 256MB for large 
file-systems. |
 | `dfs.namenode.handler.count` | 100 | More NameNode server threads to handle 
RPCs from large number of DataNodes. |
 



[32/52] [abbrv] hadoop git commit: YARN-3237. AppLogAggregatorImpl fails to log error cause. Contributed by Rushabh S Shah

2015-02-23 Thread zhz
YARN-3237. AppLogAggregatorImpl fails to log error cause. Contributed by
Rushabh S Shah


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f56c65bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f56c65bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f56c65bb

Branch: refs/heads/HDFS-7285
Commit: f56c65bb3eb9436b67de2df63098e26589e70e56
Parents: 3c5ff07
Author: Xuan xg...@apache.org
Authored: Fri Feb 20 14:02:40 2015 -0800
Committer: Xuan xg...@apache.org
Committed: Fri Feb 20 14:02:40 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../containermanager/logaggregation/AppLogAggregatorImpl.java| 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f56c65bb/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c028043..359e647 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -312,6 +312,9 @@ Release 2.7.0 - UNRELEASED
 
 YARN-3230. Clarify application states on the web UI. (Jian He via wangda)
 
+YARN-3237. AppLogAggregatorImpl fails to log error cause.
+(Rushabh S Shah via xgong)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f56c65bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index 8eb00f4..787422b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -248,7 +248,7 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
 
   } catch (IOException e1) {
 LOG.error(Cannot create writer for app  + this.applicationId
-+ . Skip log upload this time. );
++ . Skip log upload this time. , e1);
 return;
   }
 
@@ -549,7 +549,7 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
 writer.append(logKey, logValue);
   } catch (Exception e) {
 LOG.error(Couldn't upload logs for  + containerId
-+ . Skipping this container.);
++ . Skipping this container., e);
 return new HashSetPath();
   }
   this.uploadedFileMeta.addAll(logValue



[47/52] [abbrv] hadoop git commit: HADOOP-11534. Minor improvements for raw erasure coders ( Contributed by Kai Zheng )

2015-02-23 Thread zhz
HADOOP-11534. Minor improvements for raw erasure coders ( Contributed by Kai 
Zheng )


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2dbe4c50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2dbe4c50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2dbe4c50

Branch: refs/heads/HDFS-7285
Commit: 2dbe4c508a63a85c54b631ad6ad56f05594183c7
Parents: 65dca22
Author: Vinayakumar B vinayakuma...@intel.com
Authored: Mon Feb 2 14:39:53 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 23 11:20:26 2015 -0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt   |  5 -
 .../org/apache/hadoop/io/erasurecode/ECChunk.java| 15 +--
 .../rawcoder/AbstractRawErasureCoder.java| 12 ++--
 3 files changed, 23 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2dbe4c50/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 8ce5a89..2124800 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -1,4 +1,7 @@
   BREAKDOWN OF HADOOP-11264 SUBTASKS AND RELATED JIRAS (Common part of 
HDFS-7285)
 
 HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding
-(Kai Zheng via umamahesh)
\ No newline at end of file
+(Kai Zheng via umamahesh)
+
+HADOOP-11534. Minor improvements for raw erasure coders
+( Kai Zheng via vinayakumarb )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2dbe4c50/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
index f84eb11..01e8f35 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
@@ -66,15 +66,26 @@ public class ECChunk {
   }
 
   /**
-   * Convert an array of this chunks to an array of byte array
+   * Convert an array of this chunks to an array of byte array.
+   * Note the chunk buffers are not affected.
* @param chunks
* @return an array of byte array
*/
   public static byte[][] toArray(ECChunk[] chunks) {
 byte[][] bytesArr = new byte[chunks.length][];
 
+ByteBuffer buffer;
 for (int i = 0; i  chunks.length; i++) {
-  bytesArr[i] = chunks[i].getBuffer().array();
+  buffer = chunks[i].getBuffer();
+  if (buffer.hasArray()) {
+bytesArr[i] = buffer.array();
+  } else {
+bytesArr[i] = new byte[buffer.remaining()];
+// Avoid affecting the original one
+buffer.mark();
+buffer.get(bytesArr[i]);
+buffer.reset();
+  }
 }
 
 return bytesArr;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2dbe4c50/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
index 474542b..74d2ab6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
@@ -24,26 +24,26 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
  */
 public abstract class AbstractRawErasureCoder implements RawErasureCoder {
 
-  private int dataSize;
-  private int paritySize;
+  private int numDataUnits;
+  private int numParityUnits;
   private int chunkSize;
 
   @Override
   public void initialize(int numDataUnits, int numParityUnits,
  int chunkSize) {
-this.dataSize = numDataUnits;
-this.paritySize = numParityUnits;
+this.numDataUnits = numDataUnits;
+this.numParityUnits = numParityUnits;
 this.chunkSize = chunkSize;
   }
 
   @Override
   public int getNumDataUnits() {
-return dataSize;
+return numDataUnits;
   }
 
   @Override
   public int 

[16/52] [abbrv] hadoop git commit: HADOOP-11440. Use test.build.data instead of build.test.dir for testing in ClientBaseWithFixes. Contributed by Kengo Seki.

2015-02-23 Thread zhz
HADOOP-11440. Use test.build.data instead of build.test.dir for testing in 
ClientBaseWithFixes. Contributed by Kengo Seki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18fb421f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18fb421f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18fb421f

Branch: refs/heads/HDFS-7285
Commit: 18fb421fab73789a9b692f21a99d619b5dc5c9ff
Parents: b8a14ef
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Feb 18 17:55:04 2015 -0800
Committer: Akira Ajisaka aajis...@apache.org
Committed: Wed Feb 18 17:55:04 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18fb421f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 92bd48d..7a065d5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -621,6 +621,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11521. Make connection timeout configurable in s3a.
 (Thomas Demoor via stevel)
 
+HADOOP-11440. Use test.build.data instead of build.test.dir for testing
+in ClientBaseWithFixes. (Kengo Seki via aajisaka)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18fb421f/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
index 11d4657..7d0727a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
@@ -66,7 +66,7 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
 
 public static int CONNECTION_TIMEOUT = 3;
 static final File BASETEST =
-new File(System.getProperty(build.test.dir, build));
+new File(System.getProperty(test.build.data, build));
 
 protected final String hostPort = initHostPort();
 protected int maxCnxns = 0;



[05/50] [abbrv] hadoop git commit: YARN-3179. Update use of Iterator to Iterable in RMAdminCLI and CommonNodeLabelsManager. Contributed by Ray Chiang

2015-02-23 Thread zhz
YARN-3179. Update use of Iterator to Iterable in RMAdminCLI and
CommonNodeLabelsManager. Contributed by Ray Chiang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2efa4076
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2efa4076
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2efa4076

Branch: refs/heads/HDFS-EC
Commit: 2efa4076b5ba07ac3c9a93fd728af39611bfbbae
Parents: eebef84
Author: Xuan xg...@apache.org
Authored: Thu Feb 12 17:31:45 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:47 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt  |  3 +++
 .../hadoop/yarn/client/cli/RMAdminCLI.java   | 19 ++-
 .../yarn/nodelabels/CommonNodeLabelsManager.java |  4 ++--
 3 files changed, 15 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2efa4076/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7117fbd..fdf13be 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -287,6 +287,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3158. Correct log messages in ResourceTrackerService.
 (Varun Saxena via xgong)
 
+YARN-3179. Update use of Iterator to Iterable in RMAdminCLI and
+CommonNodeLabelsManager. (Ray Chiang via xgong)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2efa4076/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index af83102..d29f674 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -128,12 +128,12 @@ public class RMAdminCLI extends HAAdmin {
   }
 
   private static void appendHAUsage(final StringBuilder usageBuilder) {
-for (String cmdKey : USAGE.keySet()) {
-  if (cmdKey.equals(-help)) {
+for (Map.EntryString,UsageInfo cmdEntry : USAGE.entrySet()) {
+  if (cmdEntry.getKey().equals(-help)) {
 continue;
   }
-  UsageInfo usageInfo = USAGE.get(cmdKey);
-  usageBuilder.append( [ + cmdKey +   + usageInfo.args + ]);
+  UsageInfo usageInfo = cmdEntry.getValue();
+  usageBuilder.append( [ + cmdEntry.getKey() +   + usageInfo.args + 
]);
 }
   }
 
@@ -173,14 +173,15 @@ public class RMAdminCLI extends HAAdmin {
   private static void buildUsageMsg(StringBuilder builder,
   boolean isHAEnabled) {
 builder.append(Usage: yarn rmadmin\n);
-for (String cmdKey : ADMIN_USAGE.keySet()) {
-  UsageInfo usageInfo = ADMIN_USAGE.get(cmdKey);
-  builder.append(+ cmdKey +   + usageInfo.args + \n);
+for (Map.EntryString,UsageInfo cmdEntry : ADMIN_USAGE.entrySet()) {
+  UsageInfo usageInfo = cmdEntry.getValue();
+  builder.append(+ cmdEntry.getKey() +   + usageInfo.args + \n);
 }
 if (isHAEnabled) {
-  for (String cmdKey : USAGE.keySet()) {
+  for (Map.EntryString,UsageInfo cmdEntry : USAGE.entrySet()) {
+String cmdKey = cmdEntry.getKey();
 if (!cmdKey.equals(-help)) {
-  UsageInfo usageInfo = USAGE.get(cmdKey);
+  UsageInfo usageInfo = cmdEntry.getValue();
   builder.append(+ cmdKey +   + usageInfo.args + \n);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2efa4076/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index 25ff417..cb6f1f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -383,8 +383,8 @@ public class CommonNodeLabelsManager 

[02/50] [abbrv] hadoop git commit: HDFS-7322. deprecate sbin/hadoop-daemon.sh (aw)

2015-02-23 Thread zhz
HDFS-7322. deprecate sbin/hadoop-daemon.sh (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52eb87bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52eb87bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52eb87bb

Branch: refs/heads/HDFS-EC
Commit: 52eb87bb425312043317450704d5645418cca4f8
Parents: cf4b6c6
Author: Allen Wittenauer a...@apache.org
Authored: Thu Feb 12 11:51:35 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:46 2015 -0800

--
 hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh | 3 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 2 ++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/52eb87bb/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
--
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
index b60915c..a84e0e0 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
@@ -52,5 +52,8 @@ else
   hdfsscript=${HADOOP_HDFS_HOME}/bin/hdfs
 fi
 
+hadoop_error WARNING: Use of this script to ${daemonmode} HDFS daemons is 
deprecated.
+hadoop_error WARNING: Attempting to execute replacement \hdfs --daemon 
${daemonmode}\ instead.
+
 exec $hdfsscript --config ${HADOOP_CONF_DIR} --daemon ${daemonmode} $@
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52eb87bb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f07da80..6e54428 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -139,6 +139,8 @@ Trunk (Unreleased)
 HDFS-316. Balancer should run for a configurable # of iterations (Xiaoyu
 Yao via aw)
 
+HDFS-7322. deprecate sbin/hadoop-daemon.sh (aw)
+
   OPTIMIZATIONS
 
   BUG FIXES



[20/50] [abbrv] hadoop git commit: HDFS-7668. Convert site documentation from apt to markdown (Masatake Iwasaki via aw)

2015-02-23 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a45ef2b6/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
new file mode 100644
index 000..87a9fcd
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
@@ -0,0 +1,240 @@
+!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+HDFS Architecture
+=
+
+* [HDFS Architecture](#HDFS_Architecture)
+* [Introduction](#Introduction)
+* [Assumptions and Goals](#Assumptions_and_Goals)
+* [Hardware Failure](#Hardware_Failure)
+* [Streaming Data Access](#Streaming_Data_Access)
+* [Large Data Sets](#Large_Data_Sets)
+* [Simple Coherency Model](#Simple_Coherency_Model)
+* [Moving Computation is Cheaper than Moving 
Data](#aMoving_Computation_is_Cheaper_than_Moving_Data)
+* [Portability Across Heterogeneous Hardware and Software 
Platforms](#Portability_Across_Heterogeneous_Hardware_and_Software_Platforms)
+* [NameNode and DataNodes](#NameNode_and_DataNodes)
+* [The File System Namespace](#The_File_System_Namespace)
+* [Data Replication](#Data_Replication)
+* [Replica Placement: The First Baby 
Steps](#Replica_Placement:_The_First_Baby_Steps)
+* [Replica Selection](#Replica_Selection)
+* [Safemode](#Safemode)
+* [The Persistence of File System 
Metadata](#The_Persistence_of_File_System_Metadata)
+* [The Communication Protocols](#The_Communication_Protocols)
+* [Robustness](#Robustness)
+* [Data Disk Failure, Heartbeats and 
Re-Replication](#Data_Disk_Failure_Heartbeats_and_Re-Replication)
+* [Cluster Rebalancing](#Cluster_Rebalancing)
+* [Data Integrity](#Data_Integrity)
+* [Metadata Disk Failure](#Metadata_Disk_Failure)
+* [Snapshots](#Snapshots)
+* [Data Organization](#Data_Organization)
+* [Data Blocks](#Data_Blocks)
+* [Staging](#Staging)
+* [Replication Pipelining](#Replication_Pipelining)
+* [Accessibility](#Accessibility)
+* [FS Shell](#FS_Shell)
+* [DFSAdmin](#DFSAdmin)
+* [Browser Interface](#Browser_Interface)
+* [Space Reclamation](#Space_Reclamation)
+* [File Deletes and Undeletes](#File_Deletes_and_Undeletes)
+* [Decrease Replication Factor](#Decrease_Replication_Factor)
+* [References](#References)
+
+Introduction
+
+
+The Hadoop Distributed File System (HDFS) is a distributed file system 
designed to run on commodity hardware. It has many similarities with existing 
distributed file systems. However, the differences from other distributed file 
systems are significant. HDFS is highly fault-tolerant and is designed to be 
deployed on low-cost hardware. HDFS provides high throughput access to 
application data and is suitable for applications that have large data sets. 
HDFS relaxes a few POSIX requirements to enable streaming access to file system 
data. HDFS was originally built as infrastructure for the Apache Nutch web 
search engine project. HDFS is part of the Apache Hadoop Core project. The 
project URL is http://hadoop.apache.org/.
+
+Assumptions and Goals
+-
+
+### Hardware Failure
+
+Hardware failure is the norm rather than the exception. An HDFS instance may 
consist of hundreds or thousands of server machines, each storing part of the 
file system’s data. The fact that there are a huge number of components and 
that each component has a non-trivial probability of failure means that some 
component of HDFS is always non-functional. Therefore, detection of faults and 
quick, automatic recovery from them is a core architectural goal of HDFS.
+
+### Streaming Data Access
+
+Applications that run on HDFS need streaming access to their data sets. They 
are not general purpose applications that typically run on general purpose file 
systems. HDFS is designed more for batch processing rather than interactive use 
by users. The emphasis is on high throughput of data access rather than low 
latency of data access. POSIX imposes many hard requirements that are not 
needed for applications that are targeted for HDFS. POSIX semantics in a few 
key areas has been traded to increase data throughput rates.
+

[19/50] [abbrv] hadoop git commit: HDFS-7668. Convert site documentation from apt to markdown (Masatake Iwasaki via aw)

2015-02-23 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a45ef2b6/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsQuotaAdminGuide.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsQuotaAdminGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsQuotaAdminGuide.md
new file mode 100644
index 000..380604b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsQuotaAdminGuide.md
@@ -0,0 +1,92 @@
+!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+HDFS Quotas Guide
+=
+
+* [HDFS Quotas Guide](#HDFS_Quotas_Guide)
+* [Overview](#Overview)
+* [Name Quotas](#Name_Quotas)
+* [Space Quotas](#Space_Quotas)
+* [Administrative Commands](#Administrative_Commands)
+* [Reporting Command](#Reporting_Command)
+
+Overview
+
+
+The Hadoop Distributed File System (HDFS) allows the administrator to set 
quotas for the number of names used and the amount of space used for individual 
directories. Name quotas and space quotas operate independently, but the 
administration and implementation of the two types of quotas are closely 
parallel.
+
+Name Quotas
+---
+
+The name quota is a hard limit on the number of file and directory names in 
the tree rooted at that directory. File and directory creations fail if the 
quota would be exceeded. Quotas stick with renamed directories; the rename 
operation fails if operation would result in a quota violation. The attempt to 
set a quota will still succeed even if the directory would be in violation of 
the new quota. A newly created directory has no associated quota. The largest 
quota is Long.Max\_Value. A quota of one forces a directory to remain empty. 
(Yes, a directory counts against its own quota!)
+
+Quotas are persistent with the fsimage. When starting, if the fsimage is 
immediately in violation of a quota (perhaps the fsimage was surreptitiously 
modified), a warning is printed for each of such violations. Setting or 
removing a quota creates a journal entry.
+
+Space Quotas
+
+
+The space quota is a hard limit on the number of bytes used by files in the 
tree rooted at that directory. Block allocations fail if the quota would not 
allow a full block to be written. Each replica of a block counts against the 
quota. Quotas stick with renamed directories; the rename operation fails if the 
operation would result in a quota violation. A newly created directory has no 
associated quota. The largest quota is `Long.Max_Value`. A quota of zero still 
permits files to be created, but no blocks can be added to the files. 
Directories don't use host file system space and don't count against the space 
quota. The host file system space used to save the file meta data is not 
counted against the quota. Quotas are charged at the intended replication 
factor for the file; changing the replication factor for a file will credit or 
debit quotas.
+
+Quotas are persistent with the fsimage. When starting, if the fsimage is 
immediately in violation of a quota (perhaps the fsimage was surreptitiously 
modified), a warning is printed for each of such violations. Setting or 
removing a quota creates a journal entry.
+
+Administrative Commands
+---
+
+Quotas are managed by a set of commands available only to the administrator.
+
+*   `hdfs dfsadmin -setQuota N directory...directory`
+
+Set the name quota to be N for each directory. Best effort for each
+directory, with faults reported if N is not a positive long
+integer, the directory does not exist or it is a file, or the
+directory would immediately exceed the new quota.
+
+*   `hdfs dfsadmin -clrQuota directory...directory`
+
+Remove any name quota for each directory. Best effort for each
+directory, with faults reported if the directory does not exist or
+it is a file. It is not a fault if the directory has no quota.
+
+*   `hdfs dfsadmin -setSpaceQuota N directory...directory`
+
+Set the space quota to be N bytes for each directory. This is a
+hard limit on total size of all the files under the directory tree.
+The space quota takes replication also into account, i.e. one GB of
+data with replication of 3 consumes 3GB of quota. N can also be
+specified with a binary prefix for convenience, for e.g. 50g for 50
+gigabytes and 2t for 2 terabytes etc. Best effort 

[13/50] [abbrv] hadoop git commit: YARN-2994. Document work-preserving RM restart. Contributed by Jian He.

2015-02-23 Thread zhz
YARN-2994. Document work-preserving RM restart. Contributed by Jian He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb58025c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb58025c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb58025c

Branch: refs/heads/HDFS-EC
Commit: eb58025cadf8bf82963f99b64a7352a2e6b45f2f
Parents: a45ef2b
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Fri Feb 13 13:08:13 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:48 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../src/site/apt/ResourceManagerRestart.apt.vm  | 182 ++-
 2 files changed, 138 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb58025c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 41e5411..622072f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -83,6 +83,8 @@ Release 2.7.0 - UNRELEASED
  YARN-2616 [YARN-913] Add CLI client to the registry to list, view
  and manipulate entries. (Akshay Radia via stevel)
 
+YARN-2994. Document work-preserving RM restart. (Jian He via ozawa)
+
   IMPROVEMENTS
 
 YARN-3005. [JDK7] Use switch statement for String instead of if-else

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb58025c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRestart.apt.vm
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRestart.apt.vm
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRestart.apt.vm
index 30a3a64..a08c19d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRestart.apt.vm
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRestart.apt.vm
@@ -11,12 +11,12 @@
 ~~ limitations under the License. See accompanying LICENSE file.
 
   ---
-  ResourceManger Restart
+  ResourceManager Restart
   ---
   ---
   ${maven.build.timestamp}
 
-ResourceManger Restart
+ResourceManager Restart
 
 %{toc|section=1|fromDepth=0}
 
@@ -32,23 +32,26 @@ ResourceManger Restart
 
   ResourceManager Restart feature is divided into two phases:
 
-  ResourceManager Restart Phase 1: Enhance RM to persist application/attempt 
state
+  ResourceManager Restart Phase 1 (Non-work-preserving RM restart):
+  Enhance RM to persist application/attempt state
   and other credentials information in a pluggable state-store. RM will reload
   this information from state-store upon restart and re-kick the previously
   running applications. Users are not required to re-submit the applications.
 
-  ResourceManager Restart Phase 2:
-  Focus on re-constructing the running state of ResourceManger by reading back
-  the container statuses from NodeMangers and container requests from 
ApplicationMasters
+  ResourceManager Restart Phase 2 (Work-preserving RM restart):
+  Focus on re-constructing the running state of ResourceManager by combining
+  the container statuses from NodeManagers and container requests from 
ApplicationMasters
   upon restart. The key difference from phase 1 is that previously running 
applications
   will not be killed after RM restarts, and so applications won't lose its work
   because of RM outage.
 
+* {Feature}
+
+** Phase 1: Non-work-preserving RM restart
+
   As of Hadoop 2.4.0 release, only ResourceManager Restart Phase 1 is 
implemented which
   is described below.
 
-* {Feature}
-
   The overall concept is that RM will persist the application metadata
   (i.e. ApplicationSubmissionContext) in
   a pluggable state-store when client submits an application and also saves 
the final status
@@ -62,13 +65,13 @@ ResourceManger Restart
   applications if they were already completed (i.e. failed, killed, finished)
   before RM went down.
 
-  NodeMangers and clients during the down-time of RM will keep polling RM 
until 
+  NodeManagers and clients during the down-time of RM will keep polling RM 
until 
   RM comes up. When RM becomes alive, it will send a re-sync command to
-  all the NodeMangers and ApplicationMasters it was talking to via heartbeats.
-  Today, the behaviors for NodeMangers and ApplicationMasters to handle this 
command
+  all the NodeManagers and ApplicationMasters it was talking to via heartbeats.
+  As of Hadoop 2.4.0 release, the behaviors for NodeManagers and 
ApplicationMasters to handle this command
   are: NMs will kill all its managed containers and re-register with RM. From 
the
   RM's perspective, these 

[47/50] [abbrv] hadoop git commit: YARN-1299. Improve a log message in AppSchedulingInfo by adding application id. Contributed by Ashutosh Jindal and Devaraj K.

2015-02-23 Thread zhz
YARN-1299. Improve a log message in AppSchedulingInfo by adding application id. 
Contributed by Ashutosh Jindal and Devaraj K.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c07f1de5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c07f1de5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c07f1de5

Branch: refs/heads/HDFS-EC
Commit: c07f1de586bd2fd05f3668a6288213013c80f881
Parents: 561d2ce
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Feb 17 01:48:22 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:51 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java  | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c07f1de5/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7161dc3..66543cd 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -298,6 +298,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3203. Correct a log message in AuxServices. (Brahma Reddy Battula 
 via ozawa)
 
+YARN-1299. Improve a log message in AppSchedulingInfo by adding 
application 
+id. (Ashutosh Jindal and Devaraj K via ozawa)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c07f1de5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index 3ade7f7..a9a459f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -176,7 +176,8 @@ public class AppSchedulingInfo {
 
 // Similarly, deactivate application?
 if (request.getNumContainers() = 0) {
-  LOG.info(checking for deactivate... );
+  LOG.info(checking for deactivate of application :
+  + this.applicationId);
   checkForDeactivation();
 }
 



[03/50] [abbrv] hadoop git commit: YARN-2079. Recover NonAggregatingLogHandler state upon nodemanager restart. (Contributed by Jason Lowe)

2015-02-23 Thread zhz
YARN-2079. Recover NonAggregatingLogHandler state upon nodemanager restart. 
(Contributed by Jason Lowe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf4b6c69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf4b6c69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf4b6c69

Branch: refs/heads/HDFS-EC
Commit: cf4b6c69c551469acc5055cebe1a25fa2c8cd9ae
Parents: 25fe4d9
Author: Junping Du junping...@apache.org
Authored: Thu Feb 12 11:46:47 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:46 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../containermanager/ContainerManagerImpl.java  |  4 +-
 .../loghandler/NonAggregatingLogHandler.java| 63 ++--
 .../recovery/NMLeveldbStateStoreService.java| 67 -
 .../recovery/NMNullStateStoreService.java   | 16 
 .../recovery/NMStateStoreService.java   | 35 +
 .../yarn_server_nodemanager_recovery.proto  |  4 +
 .../TestNonAggregatingLogHandler.java   | 79 ++--
 .../recovery/NMMemoryStateStoreService.java | 79 +---
 .../TestNMLeveldbStateStoreService.java | 51 +
 10 files changed, 362 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf4b6c69/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 47ccbe9..d1b684e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -274,6 +274,9 @@ Release 2.7.0 - UNRELEASED
 
 YARN-3147. Clean up RM web proxy code. (Steve Loughran via xgong)
 
+YARN-2079. Recover NonAggregatingLogHandler state upon nodemanager
+restart. (Jason Lowe via junping_du) 
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf4b6c69/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index bb277d9..acac600 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -135,7 +135,6 @@ import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.Re
 import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerStatus;
 import 
org.apache.hadoop.yarn.server.nodemanager.security.authorize.NMPolicyProvider;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
-import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -362,7 +361,8 @@ public class ContainerManagerImpl extends CompositeService 
implements
   deletionService, dirsHandler);
 } else {
   return new NonAggregatingLogHandler(this.dispatcher, deletionService,
-  dirsHandler);
+  dirsHandler,
+  context.getNMStateStore());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf4b6c69/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
index 

[33/52] [abbrv] hadoop git commit: HDFS-7740. Test truncate with DataNodes restarting. (yliu)

2015-02-23 Thread zhz
HDFS-7740. Test truncate with DataNodes restarting. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/737bad02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/737bad02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/737bad02

Branch: refs/heads/HDFS-7285
Commit: 737bad02d4cf879fa7d20b7c0e083d9dc59f604c
Parents: 6f01330
Author: yliu y...@apache.org
Authored: Sat Feb 21 06:32:34 2015 +0800
Committer: yliu y...@apache.org
Committed: Sat Feb 21 06:32:34 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hdfs/server/namenode/TestFileTruncate.java  | 221 +++
 2 files changed, 223 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/737bad02/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c47b89d..c8b6610 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -666,6 +666,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7773. Additional metrics in HDFS to be accessed via jmx.
 (Anu Engineer via cnauroth)
 
+HDFS-7740. Test truncate with DataNodes restarting. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/737bad02/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 253727d..19b5cde 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -93,6 +94,8 @@ public class TestFileTruncate {
 conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
 conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE);
 conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT);
+conf.setLong(
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
 cluster = new MiniDFSCluster.Builder(conf)
 .format(true)
 .numDataNodes(DATANODE_NUM)
@@ -623,6 +626,224 @@ public class TestFileTruncate {
   }
 
   /**
+   * The last block is truncated at mid. (non copy-on-truncate)
+   * dn0 is shutdown before truncate and restart after truncate successful.
+   */
+  @Test(timeout=6)
+  public void testTruncateWithDataNodesRestart() throws Exception {
+int startingFileSize = 3 * BLOCK_SIZE;
+byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
+final Path parent = new Path(/test);
+final Path p = new Path(parent, testTruncateWithDataNodesRestart);
+
+writeContents(contents, startingFileSize, p);
+LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
+
+int dn = 0;
+int toTruncateLength = 1;
+int newLength = startingFileSize - toTruncateLength;
+cluster.getDataNodes().get(dn).shutdown();
+try {
+  boolean isReady = fs.truncate(p, newLength);
+  assertFalse(isReady);
+} finally {
+  cluster.restartDataNode(dn);
+  cluster.waitActive();
+  cluster.triggerBlockReports();
+}
+
+LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
+/*
+ * For non copy-on-truncate, the truncated block id is the same, but the 
+ * GS should increase.
+ * We trigger block report for dn0 after it restarts, since the GS 
+ * of replica for the last block on it is old, so the reported last block
+ * from dn0 should be marked corrupt on nn and the replicas of last block 
+ * on nn should decrease 1, then the truncated block will be replicated 
+ * to dn0.
+ */
+assertEquals(newBlock.getBlock().getBlockId(), 
+oldBlock.getBlock().getBlockId());
+

[14/52] [abbrv] hadoop git commit: HDFS-7656. Expose truncate API for HDFS httpfs. (yliu)

2015-02-23 Thread zhz
HDFS-7656. Expose truncate API for HDFS httpfs. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fd02afe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fd02afe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fd02afe

Branch: refs/heads/HDFS-7285
Commit: 2fd02afeca3710f487b6a039a65c1a666322b229
Parents: 64a8375
Author: yliu y...@apache.org
Authored: Thu Feb 19 08:36:31 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Feb 19 08:36:31 2015 +0800

--
 .../hadoop/fs/http/client/HttpFSFileSystem.java | 24 ++-
 .../hadoop/fs/http/server/FSOperations.java | 43 +++-
 .../http/server/HttpFSParametersProvider.java   | 20 +
 .../hadoop/fs/http/server/HttpFSServer.java | 10 +
 .../fs/http/client/BaseTestHttpFSWith.java  | 40 --
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 6 files changed, 133 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fd02afe/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 5b079e9..20b212e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -109,12 +109,15 @@ public class HttpFSFileSystem extends FileSystem
   public static final String XATTR_VALUE_PARAM = xattr.value;
   public static final String XATTR_SET_FLAG_PARAM = flag;
   public static final String XATTR_ENCODING_PARAM = encoding;
+  public static final String NEW_LENGTH_PARAM = newlength;
 
   public static final Short DEFAULT_PERMISSION = 0755;
   public static final String ACLSPEC_DEFAULT = ;
 
   public static final String RENAME_JSON = boolean;
 
+  public static final String TRUNCATE_JSON = boolean;
+
   public static final String DELETE_JSON = boolean;
 
   public static final String MKDIRS_JSON = boolean;
@@ -191,7 +194,7 @@ public class HttpFSFileSystem extends FileSystem
 GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
 GETFILECHECKSUM(HTTP_GET),  GETFILEBLOCKLOCATIONS(HTTP_GET),
 INSTRUMENTATION(HTTP_GET), GETACLSTATUS(HTTP_GET),
-APPEND(HTTP_POST), CONCAT(HTTP_POST),
+APPEND(HTTP_POST), CONCAT(HTTP_POST), TRUNCATE(HTTP_POST),
 CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
 SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
 MODIFYACLENTRIES(HTTP_PUT), REMOVEACLENTRIES(HTTP_PUT),
@@ -568,6 +571,25 @@ public class HttpFSFileSystem extends FileSystem
   }
 
   /**
+   * Truncate a file.
+   * 
+   * @param f the file to be truncated.
+   * @param newLength The size the file is to be truncated to.
+   *
+   * @throws IOException
+   */
+  @Override
+  public boolean truncate(Path f, long newLength) throws IOException {
+MapString, String params = new HashMapString, String();
+params.put(OP_PARAM, Operation.TRUNCATE.toString());
+params.put(NEW_LENGTH_PARAM, Long.toString(newLength));
+HttpURLConnection conn = getConnection(Operation.TRUNCATE.getMethod(),
+params, f, true);
+JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
+return (Boolean) json.get(TRUNCATE_JSON);
+  }
+
+  /**
* Concat existing files together.
* @param f the path to the target destination.
* @param psrcs the paths to the sources to use for the concatenation.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fd02afe/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 4b72a51..bc290a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -364,7 +364,7 @@ public class FSOperations {
   }
 
   /**
-   * Executor that performs an append FileSystemAccess files system operation.
+   * Executor that performs a concat FileSystemAccess files system operation.
*/
   @InterfaceAudience.Private
   

[28/52] [abbrv] hadoop git commit: HDFS-7814. Fix usage string of storageType parameter for dfsadmin -setSpaceQuota/clrSpaceQuota. Contributed by Xiaoyu Yao.

2015-02-23 Thread zhz
HDFS-7814. Fix usage string of storageType parameter for dfsadmin 
-setSpaceQuota/clrSpaceQuota. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c6ae0d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c6ae0d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c6ae0d6

Branch: refs/heads/HDFS-7285
Commit: 8c6ae0d6199efa327d8f2761f2ad2163a60e5508
Parents: ce5bf92
Author: cnauroth cnaur...@apache.org
Authored: Fri Feb 20 12:21:46 2015 -0800
Committer: cnauroth cnaur...@apache.org
Committed: Fri Feb 20 12:21:46 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java | 8 
 .../hadoop-hdfs/src/test/resources/testHDFSConf.xml  | 4 ++--
 3 files changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c6ae0d6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 71ce48f..7d9d0ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -995,6 +995,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7788. Post-2.6 namenode may not start up with an image containing
 inodes created with an old release. (Rushabh Shah via kihwal)
 
+HDFS-7814. Fix usage string of storageType parameter for
+dfsadmin -setSpaceQuota/clrSpaceQuota. (Xiaoyu Yao via cnauroth)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c6ae0d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index ae9088c..6ba778f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -207,9 +207,9 @@ public class DFSAdmin extends FsShell {
   /** A class that supports command clearSpaceQuota */
   private static class ClearSpaceQuotaCommand extends DFSAdminCommand {
 private static final String NAME = clrSpaceQuota;
-private static final String USAGE = -+NAME+ dirname...dirname 
-storageType storagetype;
+private static final String USAGE = -+NAME+ [-storageType 
storagetype] dirname...dirname;
 private static final String DESCRIPTION = USAGE + :  +
-Clear the disk space quota for each directory dirName.\n +
+Clear the space quota for each directory dirName.\n +
 \t\tFor each directory, attempt to clear the quota. An error will be 
reported if\n +
 \t\t1. the directory does not exist or is a file, or\n +
 \t\t2. user is not an administrator.\n +
@@ -259,9 +259,9 @@ public class DFSAdmin extends FsShell {
   private static class SetSpaceQuotaCommand extends DFSAdminCommand {
 private static final String NAME = setSpaceQuota;
 private static final String USAGE =
-  -+NAME+ quota dirname...dirname -storageType storagetype;
+  -+NAME+ quota [-storageType storagetype] dirname...dirname;
 private static final String DESCRIPTION = USAGE + :  +
-  Set the disk space quota quota for each directory dirName.\n + 
+  Set the space quota quota for each directory dirName.\n +
   \t\tThe space quota is a long integer that puts a hard limit\n +
   \t\ton the total size of all the files under the directory tree.\n +
   \t\tThe extra space required for replication is also counted. E.g.\n +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c6ae0d6/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index 935cd58..2d3de1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -15711,7 +15711,7 @@
   comparators
 comparator
   typeRegexpComparator/type
-  expected-output^-setSpaceQuota lt;quotagt; 
lt;dirnamegt;...lt;dirnamegt; -storageType lt;storagetypegt;: Set the 
disk space quota lt;quotagt; for each directory lt;dirNamegt;.( 
)*/expected-output
+

[45/52] [abbrv] hadoop git commit: Fix Compilation Error in TestAddBlockgroup.java after the merge

2015-02-23 Thread zhz
Fix Compilation Error in TestAddBlockgroup.java after the merge


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08a9ac32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08a9ac32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08a9ac32

Branch: refs/heads/HDFS-7285
Commit: 08a9ac3227286e953afdb01a93a42adce8fdd6fe
Parents: 6caf9c7
Author: Jing Zhao ji...@apache.org
Authored: Sun Feb 8 16:01:03 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 23 11:19:59 2015 -0800

--
 .../apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08a9ac32/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
index 95133ce..06dfade 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -75,7 +75,7 @@ public class TestAddBlockgroup {
 final Path file1 = new Path(/file1);
 DFSTestUtil.createFile(fs, file1, BLOCKSIZE * 2, REPLICATION, 0L);
 INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile();
-BlockInfo[] file1Blocks = file1Node.getBlocks();
+BlockInfoContiguous[] file1Blocks = file1Node.getBlocks();
 assertEquals(2, file1Blocks.length);
 assertEquals(GROUP_SIZE, file1Blocks[0].numNodes());
 assertEquals(HdfsConstants.MAX_BLOCKS_IN_GROUP,



[34/52] [abbrv] hadoop git commit: HDFS-7813. TestDFSHAAdminMiniCluster#testFencer testcase is failing frequently. Contributed by Rakesh R.

2015-02-23 Thread zhz
HDFS-7813. TestDFSHAAdminMiniCluster#testFencer testcase is failing frequently. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d6af574
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d6af574
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d6af574

Branch: refs/heads/HDFS-7285
Commit: 0d6af574e0056fc627461eb91ed0c365b026b470
Parents: f56c65b
Author: cnauroth cnaur...@apache.org
Authored: Fri Feb 20 17:01:08 2015 -0800
Committer: cnauroth cnaur...@apache.org
Committed: Fri Feb 20 17:01:08 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java   | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6af574/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5c472a8..c47b89d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1001,6 +1001,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7814. Fix usage string of storageType parameter for
 dfsadmin -setSpaceQuota/clrSpaceQuota. (Xiaoyu Yao via cnauroth)
 
+HDFS-7813. TestDFSHAAdminMiniCluster#testFencer testcase is failing
+frequently. (Rakesh R via cnauroth)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6af574/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
index b4d45c3..9047279 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
@@ -155,6 +155,7 @@ public class TestDFSHAAdminMiniCluster {
 tool.setConf(conf);
 assertEquals(0, runTool(-transitionToActive, nn1));
 assertEquals(0, runTool(-failover, nn1, nn2));
+assertEquals(0, runTool(-failover, nn2, nn1));
 
 // Fencer has not run yet, since none of the above required fencing 
 assertEquals(, Files.toString(tmpFile, Charsets.UTF_8));



[08/50] [abbrv] hadoop git commit: YARN-3124. Fixed CS LeafQueue/ParentQueue to use QueueCapacities to track capacities-by-label. Contributed by Wangda Tan

2015-02-23 Thread zhz
YARN-3124. Fixed CS LeafQueue/ParentQueue to use QueueCapacities to track 
capacities-by-label. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f14f01be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f14f01be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f14f01be

Branch: refs/heads/HDFS-EC
Commit: f14f01bebe74fc4a83e878778f6b51d2d9e18d36
Parents: 27447c5
Author: Jian He jia...@apache.org
Authored: Thu Feb 12 14:58:09 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:47 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../dev-support/findbugs-exclude.xml|   2 +
 .../scheduler/capacity/AbstractCSQueue.java | 240 ++-
 .../scheduler/capacity/CSQueue.java |  49 +---
 .../scheduler/capacity/CSQueueUtils.java| 160 +
 .../CapacitySchedulerConfiguration.java |  73 ++
 .../scheduler/capacity/LeafQueue.java   | 196 ---
 .../scheduler/capacity/ParentQueue.java |  92 ++-
 .../scheduler/capacity/PlanQueue.java   |  13 +-
 .../scheduler/capacity/QueueCapacities.java |  68 +-
 .../scheduler/capacity/ReservationQueue.java|  10 +-
 .../scheduler/capacity/TestCSQueueUtils.java|  24 +-
 .../capacity/TestCapacityScheduler.java |   6 +-
 .../TestCapacitySchedulerNodeLabelUpdate.java   |   3 +-
 .../scheduler/capacity/TestQueueCapacities.java |   2 +-
 .../scheduler/capacity/TestQueueParsing.java|  14 +-
 .../webapp/TestRMWebServicesCapacitySched.java  |   4 +-
 17 files changed, 412 insertions(+), 547 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f14f01be/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8bc614d..b91281e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -281,6 +281,9 @@ Release 2.7.0 - UNRELEASED
 
 YARN-3181. FairScheduler: Fix up outdated findbugs issues. (kasha)
 
+YARN-3124. Fixed CS LeafQueue/ParentQueue to use QueueCapacities to track
+capacities-by-label. (Wangda Tan via jianhe)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f14f01be/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 09a9d2e..70f1a71 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -194,6 +194,8 @@
   Field name=absoluteNodeLabelCapacities /
   Field name=reservationsContinueLooking /
   Field name=absoluteCapacityByNodeLabels /
+  Field name=authorizer /
+  Field name=parent /
 /Or
 Bug pattern=IS2_INCONSISTENT_SYNC /
   /Match

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f14f01be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 753fb14..eb7218b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -44,23 +44,15 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Sets;
 
 public abstract class AbstractCSQueue implements CSQueue {
   
   CSQueue parent;
   final String queueName;
-  float capacity;
-  float 

[04/52] [abbrv] hadoop git commit: HADOOP-11593. Convert site documentation from apt to markdown (stragglers) (Masatake Iwasaki via aw)

2015-02-23 Thread zhz
HADOOP-11593. Convert site documentation from apt to markdown (stragglers) 
(Masatake Iwasaki via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6fc1f3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6fc1f3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6fc1f3e

Branch: refs/heads/HDFS-7285
Commit: b6fc1f3e4355be913b7d4f6ccd48c0c26b66d039
Parents: 7c78204
Author: Allen Wittenauer a...@apache.org
Authored: Tue Feb 17 21:30:24 2015 -1000
Committer: Allen Wittenauer a...@apache.org
Committed: Tue Feb 17 21:30:24 2015 -1000

--
 .../hadoop-auth/src/site/apt/BuildingIt.apt.vm  |   70 --
 .../src/site/apt/Configuration.apt.vm   |  377 ---
 .../hadoop-auth/src/site/apt/Examples.apt.vm|  133 ---
 .../hadoop-auth/src/site/apt/index.apt.vm   |   59 -
 .../hadoop-auth/src/site/markdown/BuildingIt.md |   56 +
 .../src/site/markdown/Configuration.md  |  341 ++
 .../hadoop-auth/src/site/markdown/Examples.md   |  109 ++
 .../hadoop-auth/src/site/markdown/index.md  |   43 +
 hadoop-common-project/hadoop-common/CHANGES.txt |3 +
 .../hadoop-kms/src/site/apt/index.apt.vm| 1020 --
 .../hadoop-kms/src/site/markdown/index.md.vm|  864 +++
 hadoop-project/src/site/apt/index.apt.vm|   73 --
 hadoop-project/src/site/markdown/index.md.vm|   72 ++
 .../hadoop-openstack/src/site/apt/index.apt.vm  |  686 
 .../hadoop-openstack/src/site/markdown/index.md |  544 ++
 .../src/site/resources/css/site.css |   30 +
 .../src/site/apt/SchedulerLoadSimulator.apt.vm  |  439 
 .../src/site/markdown/SchedulerLoadSimulator.md |  357 ++
 .../src/site/apt/HadoopStreaming.apt.vm |  792 --
 .../src/site/markdown/HadoopStreaming.md.vm |  559 ++
 20 files changed, 2978 insertions(+), 3649 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc1f3e/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm
--
diff --git a/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm 
b/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm
deleted file mode 100644
index 2ca2f0a..000
--- a/hadoop-common-project/hadoop-auth/src/site/apt/BuildingIt.apt.vm
+++ /dev/null
@@ -1,70 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~   http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License. See accompanying LICENSE file.
-
-  ---
-  Hadoop Auth, Java HTTP SPNEGO ${project.version} - Building It
-  ---
-  ---
-  ${maven.build.timestamp}
-
-Hadoop Auth, Java HTTP SPNEGO ${project.version} - Building It
-
-* Requirements
-
-  * Java 6+
-
-  * Maven 3+
-
-  * Kerberos KDC (for running Kerberos test cases)
-
-* Building
-
-  Use Maven goals: clean, test, compile, package, install
-
-  Available profiles: docs, testKerberos
-
-* Testing
-
-  By default Kerberos testcases are not run.
-
-  The requirements to run Kerberos testcases are a running KDC, a keytab
-  file with a client principal and a kerberos principal.
-
-  To run Kerberos tescases use the testKerberos Maven profile:
-
-+---+
-$ mvn test -PtestKerberos
-+---+
-
-  The following Maven -D options can be used to change the default
-  values:
-
-  * hadoop-auth.test.kerberos.realm: default value LOCALHOST
-
-  * hadoop-auth.test.kerberos.client.principal: default value client
-
-  * hadoop-auth.test.kerberos.server.principal: default value
-HTTP/localhost (it must start 'HTTP/')
-
-  * hadoop-auth.test.kerberos.keytab.file: default value
-${HOME}/${USER}.keytab
-
-** Generating Documentation
-
-  To create the documentation use the docs Maven profile:
-
-+---+
-$ mvn package -Pdocs
-+---+
-
-  The generated documentation is available at
-  hadoop-auth/target/site/.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc1f3e/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm 
b/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
deleted file mode 100644
index 88248e5..000
--- a/hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
+++ /dev/null

[48/52] [abbrv] hadoop git commit: HADOOP-11541. Raw XOR coder

2015-02-23 Thread zhz
HADOOP-11541. Raw XOR coder


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14248add
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14248add
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14248add

Branch: refs/heads/HDFS-7285
Commit: 14248add59531523481b66f4b1b8ee456117b937
Parents: 2dbe4c5
Author: Kai Zheng dran...@apache.org
Authored: Sun Feb 8 01:40:27 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 23 11:20:31 2015 -0800

--
 .../io/erasurecode/rawcoder/XorRawDecoder.java  |  81 ++
 .../io/erasurecode/rawcoder/XorRawEncoder.java  |  61 +
 .../hadoop/io/erasurecode/TestCoderBase.java| 262 +++
 .../erasurecode/rawcoder/TestRawCoderBase.java  |  96 +++
 .../erasurecode/rawcoder/TestXorRawCoder.java   |  52 
 5 files changed, 552 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14248add/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
new file mode 100644
index 000..98307a7
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A raw decoder in XOR code scheme in pure Java, adapted from HDFS-RAID.
+ */
+public class XorRawDecoder extends AbstractRawErasureDecoder {
+
+  @Override
+  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
+  ByteBuffer[] outputs) {
+assert(erasedIndexes.length == outputs.length);
+assert(erasedIndexes.length = 1);
+
+int bufSize = inputs[0].remaining();
+int erasedIdx = erasedIndexes[0];
+
+// Set the output to zeros.
+for (int j = 0; j  bufSize; j++) {
+  outputs[0].put(j, (byte) 0);
+}
+
+// Process the inputs.
+for (int i = 0; i  inputs.length; i++) {
+  // Skip the erased location.
+  if (i == erasedIdx) {
+continue;
+  }
+
+  for (int j = 0; j  bufSize; j++) {
+outputs[0].put(j, (byte) (outputs[0].get(j) ^ inputs[i].get(j)));
+  }
+}
+  }
+
+  @Override
+  protected void doDecode(byte[][] inputs, int[] erasedIndexes,
+  byte[][] outputs) {
+assert(erasedIndexes.length == outputs.length);
+assert(erasedIndexes.length = 1);
+
+int bufSize = inputs[0].length;
+int erasedIdx = erasedIndexes[0];
+
+// Set the output to zeros.
+for (int j = 0; j  bufSize; j++) {
+  outputs[0][j] = 0;
+}
+
+// Process the inputs.
+for (int i = 0; i  inputs.length; i++) {
+  // Skip the erased location.
+  if (i == erasedIdx) {
+continue;
+  }
+
+  for (int j = 0; j  bufSize; j++) {
+outputs[0][j] ^= inputs[i][j];
+  }
+}
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14248add/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
new file mode 100644
index 000..99b20b9
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the 

[10/52] [abbrv] hadoop git commit: HDFS-7772. Document hdfs balancer -exclude/-include option in HDFSCommands.html. Contributed by Xiaoyu Yao.

2015-02-23 Thread zhz
HDFS-7772. Document hdfs balancer -exclude/-include option in 
HDFSCommands.html. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2aa9979a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2aa9979a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2aa9979a

Branch: refs/heads/HDFS-7285
Commit: 2aa9979a713ab79853885264ad7739c48226aaa4
Parents: f5da556
Author: cnauroth cnaur...@apache.org
Authored: Wed Feb 18 11:46:57 2015 -0800
Committer: cnauroth cnaur...@apache.org
Committed: Wed Feb 18 12:03:07 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  3 +++
 .../hadoop/hdfs/server/balancer/Balancer.java  |  7 +++
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md  | 17 +
 3 files changed, 19 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2aa9979a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ec1c837..70eae1c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -653,6 +653,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7804. correct the haadmin command usage in 
#HDFSHighAvailabilityWithQJM.html
 (Brahma Reddy Battula via umamahesh)
 
+HDFS-7772. Document hdfs balancer -exclude/-include option in
+HDFSCommands.html (Xiaoyu Yao via cnauroth)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2aa9979a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index 5b87cb5..71338e6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -170,15 +170,14 @@ public class Balancer {
   private static final long GB = 1L  30; //1GB
   private static final long MAX_SIZE_TO_MOVE = 10*GB;
 
-  private static final String USAGE = Usage: java 
-  + Balancer.class.getSimpleName()
+  private static final String USAGE = Usage: hdfs balancer
   + \n\t[-policy policy]\tthe balancing policy: 
   + BalancingPolicy.Node.INSTANCE.getName() +  or 
   + BalancingPolicy.Pool.INSTANCE.getName()
   + \n\t[-threshold threshold]\tPercentage of disk capacity
-  + \n\t[-exclude [-f hosts-file | comma-sperated list of hosts]]
+  + \n\t[-exclude [-f hosts-file | comma-separated list of hosts]]
   + \tExcludes the specified datanodes.
-  + \n\t[-include [-f hosts-file | comma-sperated list of hosts]]
+  + \n\t[-include [-f hosts-file | comma-separated list of hosts]]
   + \tIncludes only the specified datanodes.
   + \n\t[-idleiterations idleiterations]
   + \tNumber of consecutive idle iterations (-1 for Infinite) before 
exit.;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2aa9979a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 6a7f34c..0573158 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -245,13 +245,22 @@ Commands useful for administrators of a hadoop cluster.
 
 ### `balancer`
 
-Usage: `hdfs balancer [-threshold threshold] [-policy policy] 
[-idleiterations idleiterations]`
+Usage:
+
+hdfs balancer
+  [-threshold threshold]
+  [-policy policy]
+  [-exclude [-f hosts-file | comma-separated list of hosts]]
+  [-include [-f hosts-file | comma-separated list of hosts]]
+  [-idleiterations idleiterations]
 
 | COMMAND\_OPTION | Description |
 |: |: |
-| `-policy` *policy* | `datanode` (default): Cluster is balanced if each 
datanode is balanced.br/ `blockpool`: Cluster is balanced if each block pool 
in each datanode is balanced. |
-| `-threshold` *threshold* | Percentage of disk capacity. This overwrites the 
default threshold. |
-| `-idleiterations` *iterations* | Maximum number of idle iterations before 
exit. This overwrites 

[06/52] [abbrv] hadoop git commit: HDFS-7804. correct the haadmin command usage in #HDFSHighAvailabilityWithQJM.html (Brahma Reddy Battula via umamahesh)

2015-02-23 Thread zhz
HDFS-7804. correct the haadmin command usage in 
#HDFSHighAvailabilityWithQJM.html (Brahma Reddy Battula via umamahesh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ecea5ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ecea5ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ecea5ab

Branch: refs/heads/HDFS-7285
Commit: 2ecea5ab741f62e8fd0449251f2ea4a5759f4e77
Parents: 3f56a4c
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Wed Feb 18 19:24:45 2015 +0530
Committer: Uma Maheswara Rao G umamah...@apache.org
Committed: Wed Feb 18 19:24:45 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ecea5ab/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 308b61f..ec1c837 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -650,6 +650,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7780. Update use of Iterator to Iterable in DataXceiverServer and
 SnapshotDiffInfo. (Ray Chiang via aajisaka)
 
+HDFS-7804. correct the haadmin command usage in 
#HDFSHighAvailabilityWithQJM.html
+(Brahma Reddy Battula via umamahesh)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ecea5ab/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
index a285fde..63813d9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
@@ -382,7 +382,7 @@ You can visit each of the NameNodes' web pages separately 
by browsing to their c
 
 Now that your HA NameNodes are configured and started, you will have access to 
some additional commands to administer your HA HDFS cluster. Specifically, you 
should familiarize yourself with all of the subcommands of the *hdfs haadmin* 
command. Running this command without any additional arguments will display the 
following usage information:
 
-Usage: DFSHAAdmin [-ns nameserviceId]
+Usage: haadmin
 [-transitionToActive serviceId]
 [-transitionToStandby serviceId]
 [-failover [--forcefence] [--forceactive] serviceId serviceId]



[13/52] [abbrv] hadoop git commit: HADOOP-11595. Add default implementation for AbstractFileSystem#truncate. (yliu)

2015-02-23 Thread zhz
HADOOP-11595. Add default implementation for AbstractFileSystem#truncate. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64a83756
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64a83756
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64a83756

Branch: refs/heads/HDFS-7285
Commit: 64a83756350d9d0f07b72c84f2719e82cf78ee49
Parents: a19820f
Author: yliu y...@apache.org
Authored: Thu Feb 19 08:26:42 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Feb 19 08:26:42 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../main/java/org/apache/hadoop/fs/AbstractFileSystem.java| 7 +--
 .../src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java  | 6 --
 3 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64a83756/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index e6d560a..c01e3d6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -970,6 +970,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11545. ArrayIndexOutOfBoundsException is thrown with hadoop
 credential list -provider. (Brahma Reddy Battula via aajisaka)
 
+HADOOP-11595. Add default implementation for AbstractFileSystem#truncate.
+(yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64a83756/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 975cc3c..959d9d5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -642,9 +642,12 @@ public abstract class AbstractFileSystem {
* {@link FileContext#truncate(Path, long)} except that Path f must be for
* this file system.
*/
-  public abstract boolean truncate(Path f, long newLength)
+  public boolean truncate(Path f, long newLength)
   throws AccessControlException, FileNotFoundException,
-  UnresolvedLinkException, IOException;
+  UnresolvedLinkException, IOException {
+throw new UnsupportedOperationException(getClass().getSimpleName()
++  doesn't support truncate);
+  }
 
   /**
* The specification of this method matches that of

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64a83756/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
index 6b9378d..3bd14f1 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
@@ -141,12 +141,6 @@ public class TestAfsCheckPath {
 }
 
 @Override
-public boolean truncate(Path f, long newLength) throws IOException {
-  // deliberately empty
-  return false;
-}
-
-@Override
 public void renameInternal(Path src, Path dst) throws IOException {
   // deliberately empty
 }



[40/50] [abbrv] hadoop git commit: Rvert HADOOP-11589. NetUtils.createSocketAddr should trim the input URI. Contributed by Rakesh R.

2015-02-23 Thread zhz
Rvert HADOOP-11589. NetUtils.createSocketAddr should trim the input URI. 
Contributed by Rakesh R.

This reverts commit 965ce9e24ed051c6705defc0f85dcfaa4fc9a3ef.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86a63b45
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86a63b45
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86a63b45

Branch: refs/heads/HDFS-EC
Commit: 86a63b45d759e38a5ba979ca8ed801acb2bcfa7c
Parents: 00cfb25
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Sun Feb 15 00:28:59 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:50 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 ---
 .../src/main/java/org/apache/hadoop/net/NetUtils.java |  1 -
 .../java/org/apache/hadoop/conf/TestConfiguration.java| 10 --
 3 files changed, 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86a63b45/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 522ec47..99320cb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -602,9 +602,6 @@ Release 2.7.0 - UNRELEASED
 HADOOP-9869. Configuration.getSocketAddr()/getEnum() should use
 getTrimmed(). (Tsuyoshi Ozawa via aajisaka)
 
-HADOOP-11589. NetUtils.createSocketAddr should trim the input URI.
-(Rakesh R via ozawa)
-
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86a63b45/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index e475149..ef1092b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -189,7 +189,6 @@ public class NetUtils {
   throw new IllegalArgumentException(Target address cannot be null. +
   helpText);
 }
-target = target.trim();
 boolean hasScheme = target.contains(://);
 URI uri = null;
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86a63b45/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index b7c76eb..a367553 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -1480,16 +1480,6 @@ public class TestConfiguration extends TestCase {
 // it's expected behaviour.
   }
 
-  public void testTrimCreateSocketAddress() {
-Configuration conf = new Configuration();
-NetUtils.addStaticResolution(host, 127.0.0.1);
-final String defaultAddr = host:1  ;
-
-InetSocketAddress addr = NetUtils.createSocketAddr(defaultAddr);
-conf.setSocketAddr(myAddress, addr);
-assertEquals(defaultAddr.trim(), NetUtils.getHostPortString(addr));
-  }
-
   public static void main(String[] argv) throws Exception {
 junit.textui.TestRunner.main(new String[]{
   TestConfiguration.class.getName()



[37/52] [abbrv] hadoop git commit: HDFS-7806. Refactor: move StorageType from hadoop-hdfs to hadoop-common. (Contributed by Xiaoyu Yao)

2015-02-23 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b465b4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 6ba778f..7d0edb2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFormat;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.client.BlockReportOptions;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -71,7 +72,6 @@ import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
-import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.ipc.GenericRefreshProtocol;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b465b4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index c7fd2f8..5b391c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -30,15 +30,24 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CacheFlag;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem.Statistics;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.*;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b465b4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 33bd4e5..5297ba2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
 import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.ServiceFailedException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b465b4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
index d5225a4..ca2d8d6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
+++ 

[43/52] [abbrv] hadoop git commit: HDFS-7339. Allocating and persisting block groups in NameNode. Contributed by Zhe Zhang

2015-02-23 Thread zhz
HDFS-7339. Allocating and persisting block groups in NameNode. Contributed by 
Zhe Zhang

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d6fffc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d6fffc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d6fffc2

Branch: refs/heads/HDFS-7285
Commit: 6d6fffc229adc487f6f3f70fb70bff0ad71b5f60
Parents: 9815e23
Author: Zhe Zhang z...@apache.org
Authored: Fri Jan 30 16:16:26 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 23 11:19:31 2015 -0800

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +
 .../hadoop/hdfs/protocol/HdfsConstants.java |  4 +
 .../server/blockmanagement/BlockIdManager.java  |  8 +-
 .../SequentialBlockGroupIdGenerator.java| 82 +++
 .../SequentialBlockIdGenerator.java |  6 +-
 .../hdfs/server/namenode/FSDirectory.java   |  8 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 34 +---
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 11 +++
 .../hdfs/server/namenode/TestAddBlockgroup.java | 84 
 9 files changed, 223 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d6fffc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 975f023..9e9cd40 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -219,6 +219,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3;
   public static final String  DFS_NAMENODE_REPLICATION_MIN_KEY = 
dfs.namenode.replication.min;
   public static final int DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1;
+  public static final String  DFS_NAMENODE_STRIPE_MIN_KEY = 
dfs.namenode.stripe.min;
+  public static final int DFS_NAMENODE_STRIPE_MIN_DEFAULT = 1;
   public static final String  DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY 
= dfs.namenode.replication.pending.timeout-sec;
   public static final int 
DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
   public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY = 
dfs.namenode.replication.max-streams;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d6fffc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 6945074..e3e3f37 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -181,4 +181,8 @@ public class HdfsConstants {
   public static final byte WARM_STORAGE_POLICY_ID = 5;
   public static final byte EC_STORAGE_POLICY_ID = 4;
   public static final byte COLD_STORAGE_POLICY_ID = 2;
+
+  public static final byte NUM_DATA_BLOCKS = 3;
+  public static final byte NUM_PARITY_BLOCKS = 2;
+  public static final byte MAX_BLOCKS_IN_GROUP = 16;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d6fffc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 1c69203..c8b9d20 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -53,10 +53,12 @@ public 

[52/52] [abbrv] hadoop git commit: HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng

2015-02-23 Thread zhz
HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6d064b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6d064b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6d064b3

Branch: refs/heads/HDFS-7285
Commit: e6d064b3b8c81184a1a8eb99fcf1ef00bd1de2d7
Parents: 9d1ec74
Author: drankye dran...@gmail.com
Authored: Thu Feb 12 21:12:44 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 23 11:22:50 2015 -0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   4 +
 .../io/erasurecode/rawcoder/JRSRawDecoder.java  |  69 +++
 .../io/erasurecode/rawcoder/JRSRawEncoder.java  |  78 +++
 .../erasurecode/rawcoder/RawErasureCoder.java   |   2 +-
 .../erasurecode/rawcoder/util/GaloisField.java  | 497 +++
 .../io/erasurecode/rawcoder/util/RSUtil.java|  22 +
 .../hadoop/io/erasurecode/TestCoderBase.java|  28 +-
 .../erasurecode/rawcoder/TestJRSRawCoder.java   |  93 
 .../erasurecode/rawcoder/TestRawCoderBase.java  |   5 +-
 .../erasurecode/rawcoder/TestXorRawCoder.java   |   1 -
 10 files changed, 786 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6d064b3/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 9728f97..7bbacf7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -8,3 +8,7 @@
 
 HADOOP-11541. Raw XOR coder
 ( Kai Zheng )
+
+HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng
+( Kai Zheng )
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6d064b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
new file mode 100644
index 000..dbb689e
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.io.erasurecode.rawcoder.util.RSUtil;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A raw erasure decoder in RS code scheme in pure Java in case native one
+ * isn't available in some environment. Please always use native 
implementations
+ * when possible.
+ */
+public class JRSRawDecoder extends AbstractRawErasureDecoder {
+  // To describe and calculate the needed Vandermonde matrix
+  private int[] errSignature;
+  private int[] primitivePower;
+
+  @Override
+  public void initialize(int numDataUnits, int numParityUnits, int chunkSize) {
+super.initialize(numDataUnits, numParityUnits, chunkSize);
+assert (getNumDataUnits() + getNumParityUnits()  
RSUtil.GF.getFieldSize());
+
+this.errSignature = new int[getNumParityUnits()];
+this.primitivePower = RSUtil.getPrimitivePower(getNumDataUnits(),
+getNumParityUnits());
+  }
+
+  @Override
+  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
+  ByteBuffer[] outputs) {
+for (int i = 0; i  erasedIndexes.length; i++) {
+  errSignature[i] = primitivePower[erasedIndexes[i]];
+  RSUtil.GF.substitute(inputs, outputs[i], primitivePower[i]);
+}
+
+int dataLen = inputs[0].remaining();
+RSUtil.GF.solveVandermondeSystem(errSignature, outputs,
+erasedIndexes.length, dataLen);
+  }
+
+  @Override
+  protected void 

[42/52] [abbrv] hadoop git commit: HDFS-7347. Configurable erasure coding policy for individual files and directories ( Contributed by Zhe Zhang )

2015-02-23 Thread zhz
HDFS-7347. Configurable erasure coding policy for individual files and 
directories ( Contributed by Zhe Zhang )


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9815e236
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9815e236
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9815e236

Branch: refs/heads/HDFS-7285
Commit: 9815e236202848b0dd76065b7267019567ee4a78
Parents: fe7a302
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu Nov 6 10:03:26 2014 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 23 11:18:12 2015 -0800

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  4 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java |  2 +
 .../BlockStoragePolicySuite.java|  5 ++
 .../hadoop/hdfs/TestBlockStoragePolicy.java | 12 +++-
 .../TestBlockInitialEncoding.java   | 75 
 5 files changed, 95 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9815e236/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
new file mode 100644
index 000..2ef8527
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -0,0 +1,4 @@
+  BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS
+
+HDFS-7347. Configurable erasure coding policy for individual files and
+directories ( Zhe Zhang via vinayakumarb )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9815e236/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 1769794..6945074 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -171,6 +171,7 @@ public class HdfsConstants {
   public static final String ONESSD_STORAGE_POLICY_NAME = ONE_SSD;
   public static final String HOT_STORAGE_POLICY_NAME = HOT;
   public static final String WARM_STORAGE_POLICY_NAME = WARM;
+  public static final String EC_STORAGE_POLICY_NAME = EC;
   public static final String COLD_STORAGE_POLICY_NAME = COLD;
 
   public static final byte MEMORY_STORAGE_POLICY_ID = 15;
@@ -178,5 +179,6 @@ public class HdfsConstants {
   public static final byte ONESSD_STORAGE_POLICY_ID = 10;
   public static final byte HOT_STORAGE_POLICY_ID = 7;
   public static final byte WARM_STORAGE_POLICY_ID = 5;
+  public static final byte EC_STORAGE_POLICY_ID = 4;
   public static final byte COLD_STORAGE_POLICY_ID = 2;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9815e236/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
index 9d8bdb5..2d841ce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
@@ -78,6 +78,11 @@ public class BlockStoragePolicySuite {
 new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
 new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
 new StorageType[]{StorageType.DISK, StorageType.ARCHIVE});
+final byte ecId = HdfsConstants.EC_STORAGE_POLICY_ID;
+policies[ecId] = new BlockStoragePolicy(ecId,
+HdfsConstants.EC_STORAGE_POLICY_NAME,
+new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY,
+new StorageType[]{StorageType.ARCHIVE});
 final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID;
 policies[coldId] = new BlockStoragePolicy(coldId,
 HdfsConstants.COLD_STORAGE_POLICY_NAME,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9815e236/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
--
diff --git 

[18/52] [abbrv] hadoop git commit: HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)

2015-02-23 Thread zhz
HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/946456c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/946456c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/946456c6

Branch: refs/heads/HDFS-7285
Commit: 946456c6d88780abe0251b098dd771e9e1e93ab3
Parents: 18fb421
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Thu Feb 19 12:46:46 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Thu Feb 19 13:06:53 2015 +0900

--
 .../classification/tools/StabilityOptions.java  |  5 +--
 .../AltKerberosAuthenticationHandler.java   |  6 ++--
 .../authentication/util/TestKerberosUtil.java   | 14 
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 .../org/apache/hadoop/conf/Configuration.java   |  3 +-
 .../org/apache/hadoop/crypto/CipherSuite.java   |  3 +-
 .../hadoop/crypto/key/JavaKeyStoreProvider.java |  3 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  7 ++--
 .../apache/hadoop/fs/permission/AclEntry.java   |  6 ++--
 .../org/apache/hadoop/fs/shell/find/Name.java   |  5 +--
 .../io/compress/CompressionCodecFactory.java|  6 ++--
 .../hadoop/metrics2/impl/MetricsConfig.java |  7 ++--
 .../hadoop/metrics2/impl/MetricsSystemImpl.java |  2 +-
 .../apache/hadoop/security/SecurityUtil.java|  5 +--
 .../hadoop/security/WhitelistBasedResolver.java |  4 ++-
 .../security/ssl/FileBasedKeyStoresFactory.java |  4 ++-
 .../apache/hadoop/security/ssl/SSLFactory.java  |  3 +-
 .../security/ssl/SSLHostnameVerifier.java   | 10 +++---
 .../DelegationTokenAuthenticationHandler.java   |  3 +-
 .../web/DelegationTokenAuthenticator.java   |  5 +--
 .../org/apache/hadoop/util/StringUtils.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestIPC.java |  3 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java |  3 +-
 .../hadoop/security/TestSecurityUtil.java   |  6 ++--
 .../security/TestUserGroupInformation.java  |  5 +--
 .../hadoop/test/TimedOutTestsListener.java  |  4 ++-
 .../org/apache/hadoop/util/TestWinUtils.java|  7 ++--
 .../java/org/apache/hadoop/nfs/NfsExports.java  |  5 +--
 .../server/CheckUploadContentTypeFilter.java|  4 ++-
 .../hadoop/fs/http/server/FSOperations.java |  4 ++-
 .../http/server/HttpFSParametersProvider.java   |  4 ++-
 .../org/apache/hadoop/lib/server/Server.java|  3 +-
 .../service/hadoop/FileSystemAccessService.java |  5 +--
 .../org/apache/hadoop/lib/wsrs/EnumParam.java   |  3 +-
 .../apache/hadoop/lib/wsrs/EnumSetParam.java|  3 +-
 .../hadoop/lib/wsrs/ParametersProvider.java |  3 +-
 .../org/apache/hadoop/hdfs/StorageType.java |  3 +-
 .../org/apache/hadoop/hdfs/XAttrHelper.java | 21 
 .../hadoop/hdfs/protocol/HdfsConstants.java |  3 +-
 .../BlockStoragePolicySuite.java|  4 ++-
 .../hdfs/server/common/HdfsServerConstants.java |  5 +--
 .../hdfs/server/datanode/StorageLocation.java   |  4 ++-
 .../hdfs/server/namenode/FSEditLogOp.java   |  3 +-
 .../namenode/QuotaByStorageTypeEntry.java   |  3 +-
 .../hdfs/server/namenode/SecondaryNameNode.java |  2 +-
 .../org/apache/hadoop/hdfs/tools/GetConf.java   | 17 +-
 .../OfflineEditsVisitorFactory.java |  7 ++--
 .../offlineImageViewer/FSImageHandler.java  |  3 +-
 .../org/apache/hadoop/hdfs/web/AuthFilter.java  |  3 +-
 .../org/apache/hadoop/hdfs/web/ParamFilter.java |  3 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  5 +--
 .../hadoop/hdfs/web/resources/EnumParam.java|  3 +-
 .../hadoop/hdfs/web/resources/EnumSetParam.java |  4 ++-
 .../namenode/snapshot/TestSnapshotManager.java  |  5 +--
 .../jobhistory/JobHistoryEventHandler.java  |  4 ++-
 .../mapreduce/v2/app/webapp/AppController.java  |  2 +-
 .../apache/hadoop/mapreduce/TypeConverter.java  |  3 +-
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  5 +--
 .../hadoop/mapreduce/TestTypeConverter.java |  4 ++-
 .../java/org/apache/hadoop/mapred/Task.java |  3 +-
 .../counters/FileSystemCounterGroup.java|  2 +-
 .../mapreduce/filecache/DistributedCache.java   |  4 +--
 .../hadoop/mapreduce/lib/db/DBInputFormat.java  |  4 ++-
 .../org/apache/hadoop/mapreduce/tools/CLI.java  |  8 +++--
 .../java/org/apache/hadoop/fs/TestDFSIO.java| 20 ++--
 .../org/apache/hadoop/fs/TestFileSystem.java|  6 +++-
 .../org/apache/hadoop/fs/slive/Constants.java   |  6 ++--
 .../apache/hadoop/fs/slive/OperationData.java   |  4 ++-
 .../apache/hadoop/fs/slive/OperationOutput.java |  4 ++-
 .../org/apache/hadoop/fs/slive/SliveTest.java   |  3 +-
 .../java/org/apache/hadoop/io/FileBench.java| 17 ++
 .../org/apache/hadoop/mapred/TestMapRed.java|  3 +-
 .../apache/hadoop/examples/DBCountPageView.java |  3 +-
 .../plugin/versioninfo/VersionInfoMojo.java |  4 ++-
 

[44/50] [abbrv] hadoop git commit: YARN-2749. Fix some testcases from TestLogAggregationService fails in trunk. (Contributed by Xuan Gong)

2015-02-23 Thread zhz
YARN-2749. Fix some testcases from TestLogAggregationService fails in trunk. 
(Contributed by Xuan Gong)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f09f8d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f09f8d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f09f8d2

Branch: refs/heads/HDFS-EC
Commit: 7f09f8d228027f9f41cfa5cf4812c1e2789fa027
Parents: bb736e9
Author: Junping Du junping...@apache.org
Authored: Sun Feb 15 06:46:32 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:51 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt  |  3 +++
 .../logaggregation/AppLogAggregatorImpl.java | 15 +++
 2 files changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f09f8d2/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e9a7c58..e00e447 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -599,6 +599,9 @@ Release 2.7.0 - UNRELEASED
 YARN-2899. Run TestDockerContainerExecutorWithMocks on Linux only.
 (Ming Ma via cnauroth)
 
+YARN-2749. Fix some testcases from TestLogAggregationService fails in 
trunk. 
+(Xuan Gong via junping_du)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f09f8d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index 20887b6..8eb00f4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -117,6 +117,8 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
   private final int retentionSize;
   private final long rollingMonitorInterval;
   private final NodeId nodeId;
+  // This variable is only for testing
+  private final AtomicBoolean waiting = new AtomicBoolean(false);
 
   private final MapContainerId, ContainerLogAggregator 
containerLogAggregators =
   new HashMapContainerId, ContainerLogAggregator();
@@ -391,6 +393,7 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
 while (!this.appFinishing.get()  !this.aborted.get()) {
   synchronized(this) {
 try {
+  waiting.set(true);
   if (this.rollingMonitorInterval  0) {
 wait(this.rollingMonitorInterval * 1000);
 if (this.appFinishing.get() || this.aborted.get()) {
@@ -507,7 +510,19 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
 
   @Private
   @VisibleForTesting
+  // This is only used for testing.
+  // This will wake the log aggregation thread that is waiting for
+  // rollingMonitorInterval.
+  // To use this method, make sure the log aggregation thread is running
+  // and waiting for rollingMonitorInterval.
   public synchronized void doLogAggregationOutOfBand() {
+while(!waiting.get()) {
+  try {
+wait(200);
+  } catch (InterruptedException e) {
+// Do Nothing
+  }
+}
 LOG.info(Do OutOfBand log aggregation);
 this.notifyAll();
   }



[49/52] [abbrv] hadoop git commit: Added the missed entry for commit of HADOOP-11541

2015-02-23 Thread zhz
Added the missed entry for commit of HADOOP-11541


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d3b4623
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d3b4623
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d3b4623

Branch: refs/heads/HDFS-7285
Commit: 0d3b4623aa1c61da7b20c438bd214f859b4a3906
Parents: 14248ad
Author: drankye dran...@gmail.com
Authored: Mon Feb 9 22:04:08 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 23 11:20:37 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d3b4623/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 2124800..9728f97 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -4,4 +4,7 @@
 (Kai Zheng via umamahesh)
 
 HADOOP-11534. Minor improvements for raw erasure coders
-( Kai Zheng via vinayakumarb )
\ No newline at end of file
+( Kai Zheng via vinayakumarb )
+
+HADOOP-11541. Raw XOR coder
+( Kai Zheng )



[07/52] [abbrv] hadoop git commit: HADOOP-11545. ArrayIndexOutOfBoundsException is thrown with hadoop credential list -provider. Contributed by Brahma Reddy Battula.

2015-02-23 Thread zhz
HADOOP-11545. ArrayIndexOutOfBoundsException is thrown with hadoop credential 
list -provider. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17146099
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17146099
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17146099

Branch: refs/heads/HDFS-7285
Commit: 17146099197000d85b3aedc84a672111f2c7908a
Parents: 2ecea5a
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Feb 18 11:17:10 2015 -0800
Committer: Akira Ajisaka aajis...@apache.org
Committed: Wed Feb 18 11:17:10 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../hadoop/security/alias/CredentialShell.java   | 19 ---
 .../hadoop/security/alias/TestCredShell.java | 15 +++
 3 files changed, 34 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17146099/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index f248555..92bd48d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -962,6 +962,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11599. Client#getTimeout should use IPC_CLIENT_PING_DEFAULT when 
 IPC_CLIENT_PING_KEY is not configured. (zhihai xu via ozawa)
 
+HADOOP-11545. ArrayIndexOutOfBoundsException is thrown with hadoop
+credential list -provider. (Brahma Reddy Battula via aajisaka)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17146099/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
index f397403..e8a721f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
@@ -97,6 +97,10 @@ public class CredentialShell extends Configured implements 
Tool {
 
 for (int i = 0; i  args.length; i++) { // parse command line
   if (args[i].equals(create)) {
+if (i == args.length - 1) {
+  printCredShellUsage();
+  return 1;
+}
 String alias = args[++i];
 command = new CreateCommand(alias);
 if (alias.equals(-help)) {
@@ -104,6 +108,10 @@ public class CredentialShell extends Configured implements 
Tool {
   return 0;
 }
   } else if (args[i].equals(delete)) {
+if (i == args.length - 1) {
+  printCredShellUsage();
+  return 1;
+}
 String alias = args[++i];
 command = new DeleteCommand(alias);
 if (alias.equals(-help)) {
@@ -113,6 +121,10 @@ public class CredentialShell extends Configured implements 
Tool {
   } else if (args[i].equals(list)) {
 command = new ListCommand();
   } else if (args[i].equals(-provider)) {
+if (i == args.length - 1) {
+  printCredShellUsage();
+  return 1;
+}
 userSuppliedProvider = true;
 getConf().set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, 
 args[++i]);
@@ -195,7 +207,7 @@ public class CredentialShell extends Configured implements 
Tool {
   }
 
   private class ListCommand extends Command {
-public static final String USAGE = list [-provider] [-help];
+public static final String USAGE = list [-provider provider-path];
 public static final String DESC =
 The list subcommand displays the aliases contained within \n +
 a particular provider - as configured in core-site.xml or  +
@@ -237,7 +249,7 @@ public class CredentialShell extends Configured implements 
Tool {
 
   private class DeleteCommand extends Command {
 public static final String USAGE =
-delete alias [-provider] [-f] [-help];
+delete alias [-f] [-provider provider-path];
 public static final String DESC =
 The delete subcommand deletes the credential\n +
 specified as the alias argument from within the provider\n +
@@ -308,7 +320,8 @@ public class CredentialShell extends Configured implements 
Tool {
   }
 
   private class CreateCommand extends Command {
-public static final String USAGE = create alias [-provider] [-help];
+public static final 

[09/52] [abbrv] hadoop git commit: YARN-3132. RMNodeLabelsManager should remove node from node-to-label mapping when node becomes deactivated. Contributed by Wangda Tan

2015-02-23 Thread zhz
YARN-3132. RMNodeLabelsManager should remove node from node-to-label mapping 
when node becomes deactivated. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5da5566
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5da5566
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5da5566

Branch: refs/heads/HDFS-7285
Commit: f5da5566d9c392a5df71a2dce4c2d0d50eea51ee
Parents: 4981d08
Author: Jian He jia...@apache.org
Authored: Wed Feb 18 11:51:51 2015 -0800
Committer: Jian He jia...@apache.org
Committed: Wed Feb 18 11:51:51 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../nodelabels/CommonNodeLabelsManager.java |  3 +-
 .../nodelabels/RMNodeLabelsManager.java | 20 ++-
 .../nodelabels/TestRMNodeLabelsManager.java | 63 ++--
 4 files changed, 78 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da5566/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cbba046..884b506 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -611,6 +611,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3207. Secondary filter matches entites which do not have the key being
 filtered for. (Zhijie Shen via xgong)
 
+YARN-3132. RMNodeLabelsManager should remove node from node-to-label 
mapping
+when node becomes deactivated. (Wangda Tan via jianhe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da5566/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index cb6f1f3..e2da664 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
-import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent;
 import org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType;
@@ -496,7 +495,7 @@ public class CommonNodeLabelsManager extends 
AbstractService {
 }
   }
 
-  private void removeNodeFromLabels(NodeId node, SetString labels) {
+  protected void removeNodeFromLabels(NodeId node, SetString labels) {
 for(String l : labels) {
   labelCollections.get(l).removeNodeId(node);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da5566/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
index 9942d80..e5abdc9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
@@ -228,9 +228,23 @@ public class RMNodeLabelsManager extends 
CommonNodeLabelsManager {
   MapString, Host before = cloneNodeMap(ImmutableSet.of(nodeId));
   Node nm = getNMInNodeSet(nodeId);
   if (null != nm) {
-// set nm is not running, and its resource = 0
-nm.running = false;
-nm.resource = Resource.newInstance(0, 0);
+if (null == nm.labels) {
+  // When node 

[12/52] [abbrv] hadoop git commit: YARN-1514. Utility to benchmark ZKRMStateStore#loadState for RM HA. Contributed by Tsuyoshi OZAWA

2015-02-23 Thread zhz
YARN-1514. Utility to benchmark ZKRMStateStore#loadState for RM HA. Contributed 
by Tsuyoshi OZAWA


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c033763
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c033763
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c033763

Branch: refs/heads/HDFS-7285
Commit: 1c03376300a46722d4147f5b8f37242f68dba0a2
Parents: 9a3e292
Author: Jian He jia...@apache.org
Authored: Wed Feb 18 16:06:55 2015 -0800
Committer: Jian He jia...@apache.org
Committed: Wed Feb 18 16:06:55 2015 -0800

--
 hadoop-project/pom.xml  |   1 -
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop-yarn-server-resourcemanager/pom.xml  |   8 +-
 .../org/apache/hadoop/test/YarnTestDriver.java  |  60 
 .../recovery/RMStateStoreTestBase.java  |  19 +-
 .../recovery/TestZKRMStateStorePerf.java| 277 +++
 6 files changed, 359 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c033763/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 0c7cfc8..2c0f03a 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -828,7 +828,6 @@
 artifactIdzookeeper/artifactId
 version${zookeeper.version}/version
 typetest-jar/type
-scopetest/scope
 exclusions
   exclusion
 groupIdorg.jboss.netty/groupId

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c033763/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 884b506..91ce11f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -301,6 +301,9 @@ Release 2.7.0 - UNRELEASED
 YARN-1299. Improve a log message in AppSchedulingInfo by adding 
application 
 id. (Ashutosh Jindal and Devaraj K via ozawa)
 
+YARN-1514. Utility to benchmark ZKRMStateStore#loadState for RM HA.
+(Tsuyoshi OZAWA via jianhe)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c033763/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index 9bcc7c8..ff429cc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -186,7 +186,6 @@
   groupIdorg.apache.zookeeper/groupId
   artifactIdzookeeper/artifactId
   typetest-jar/type
-  scopetest/scope
 /dependency
 !-- 'mvn dependency:analyze' fails to detect use of this dependency --
 dependency
@@ -245,6 +244,13 @@
   goaltest-jar/goal
 /goals
 phasetest-compile/phase
+configuration
+  archive
+manifest
+  mainClassorg.apache.hadoop.test.YarnTestDriver/mainClass
+/manifest
+  /archive
+/configuration
   /execution
 /executions
   /plugin

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c033763/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/test/YarnTestDriver.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/test/YarnTestDriver.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/test/YarnTestDriver.java
new file mode 100644
index 000..8874ed8
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/test/YarnTestDriver.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a 

[01/52] [abbrv] hadoop git commit: HADOOP-11593. Convert site documentation from apt to markdown (stragglers) (Masatake Iwasaki via aw)

2015-02-23 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 49204fcf1 - e6d064b3b (forced update)


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc1f3e/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
--
diff --git 
a/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm 
b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
new file mode 100644
index 000..0b64586
--- /dev/null
+++ b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
@@ -0,0 +1,559 @@
+%!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+#set ( $H3 = '###' )
+#set ( $H4 = '' )
+#set ( $H5 = '#' )
+
+Hadoop Streaming
+
+
+* [Hadoop Streaming](#Hadoop_Streaming)
+* [Hadoop Streaming](#Hadoop_Streaming)
+* [How Streaming Works](#How_Streaming_Works)
+* [Streaming Command Options](#Streaming_Command_Options)
+* [Specifying a Java Class as the 
Mapper/Reducer](#Specifying_a_Java_Class_as_the_MapperReducer)
+* [Packaging Files With Job 
Submissions](#Packaging_Files_With_Job_Submissions)
+* [Specifying Other Plugins for 
Jobs](#Specifying_Other_Plugins_for_Jobs)
+* [Setting Environment Variables](#Setting_Environment_Variables)
+* [Generic Command Options](#Generic_Command_Options)
+* [Specifying Configuration Variables with the -D 
Option](#Specifying_Configuration_Variables_with_the_-D_Option)
+* [Specifying Directories](#Specifying_Directories)
+* [Specifying Map-Only Jobs](#Specifying_Map-Only_Jobs)
+* [Specifying the Number of 
Reducers](#Specifying_the_Number_of_Reducers)
+* [Customizing How Lines are Split into Key/Value 
Pairs](#Customizing_How_Lines_are_Split_into_KeyValue_Pairs)
+* [Working with Large Files and 
Archives](#Working_with_Large_Files_and_Archives)
+* [Making Files Available to 
Tasks](#Making_Files_Available_to_Tasks)
+* [Making Archives Available to 
Tasks](#Making_Archives_Available_to_Tasks)
+* [More Usage Examples](#More_Usage_Examples)
+* [Hadoop Partitioner Class](#Hadoop_Partitioner_Class)
+* [Hadoop Comparator Class](#Hadoop_Comparator_Class)
+* [Hadoop Aggregate Package](#Hadoop_Aggregate_Package)
+* [Hadoop Field Selection Class](#Hadoop_Field_Selection_Class)
+* [Frequently Asked Questions](#Frequently_Asked_Questions)
+* [How do I use Hadoop Streaming to run an arbitrary set of (semi) 
independent 
tasks?](#How_do_I_use_Hadoop_Streaming_to_run_an_arbitrary_set_of_semi_independent_tasks)
+* [How do I process files, one per 
map?](#How_do_I_process_files_one_per_map)
+* [How many reducers should I use?](#How_many_reducers_should_I_use)
+* [If I set up an alias in my shell script, will that work after 
-mapper?](#If_I_set_up_an_alias_in_my_shell_script_will_that_work_after_-mapper)
+* [Can I use UNIX pipes?](#Can_I_use_UNIX_pipes)
+* [What do I do if I get the No space left on device 
error?](#What_do_I_do_if_I_get_the_No_space_left_on_device_error)
+* [How do I specify multiple input 
directories?](#How_do_I_specify_multiple_input_directories)
+* [How do I generate output files with gzip 
format?](#How_do_I_generate_output_files_with_gzip_format)
+* [How do I provide my own input/output format with 
streaming?](#How_do_I_provide_my_own_inputoutput_format_with_streaming)
+* [How do I parse XML documents using 
streaming?](#How_do_I_parse_XML_documents_using_streaming)
+* [How do I update counters in streaming 
applications?](#How_do_I_update_counters_in_streaming_applications)
+* [How do I update status in streaming 
applications?](#How_do_I_update_status_in_streaming_applications)
+* [How do I get the Job variables in a streaming job's 
mapper/reducer?](#How_do_I_get_the_Job_variables_in_a_streaming_jobs_mapperreducer)
+
+Hadoop Streaming
+
+
+Hadoop streaming is a utility that comes with the Hadoop distribution. The 
utility allows you to create and run Map/Reduce jobs with any executable or 
script as the mapper and/or the reducer. For example:
+
+hadoop jar hadoop-streaming-${project.version}.jar \
+  -input myInputDirs \
+  -output myOutputDir \
+  -mapper /bin/cat \
+  -reducer /usr/bin/wc
+

[23/52] [abbrv] hadoop git commit: HDFS-7752. Improve description for dfs.namenode.num.extra.edits.retained and dfs.namenode.num.checkpoints.retained properties on hdfs-default.xml. Contributed by

2015-02-23 Thread zhz
HDFS-7752. Improve description for dfs.namenode.num.extra.edits.retained and 
dfs.namenode.num.checkpoints.retained properties on hdfs-default.xml. 
Contributed by Wellington Chevreuil.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9a17909
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9a17909
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9a17909

Branch: refs/heads/HDFS-7285
Commit: b9a17909ba39898120a096cb6ae90104640690db
Parents: c0d9b93
Author: Harsh J ha...@cloudera.com
Authored: Fri Feb 20 19:20:41 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Fri Feb 20 19:20:41 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  5 +
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  | 15 +++
 2 files changed, 16 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a17909/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 80a086a..5f3cc02 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -341,6 +341,11 @@ Release 2.7.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-7752. Improve description for
+dfs.namenode.num.extra.edits.retained
+and dfs.namenode.num.checkpoints.retained properties on
+hdfs-default.xml (Wellington Chevreuil via harsh)
+
 HDFS-7055. Add tracing to DFSInputStream (cmccabe)
 
 HDFS-7186. Document the hadoop trace command. (Masatake Iwasaki via Colin

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a17909/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 9299ea3..85d2273 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -852,9 +852,9 @@
 property
   namedfs.namenode.num.checkpoints.retained/name
   value2/value
-  descriptionThe number of image checkpoint files that will be retained by
+  descriptionThe number of image checkpoint files (fsimage_*) that will be 
retained by
   the NameNode and Secondary NameNode in their storage directories. All edit
-  logs necessary to recover an up-to-date namespace from the oldest retained
+  logs (stored on edits_* files) necessary to recover an up-to-date namespace 
from the oldest retained
   checkpoint will also be retained.
   /description
 /property
@@ -863,8 +863,15 @@
   namedfs.namenode.num.extra.edits.retained/name
   value100/value
   descriptionThe number of extra transactions which should be retained
-  beyond what is minimally necessary for a NN restart. This can be useful for
-  audit purposes or for an HA setup where a remote Standby Node may have
+  beyond what is minimally necessary for a NN restart.
+  It does not translate directly to file's age, or the number of files kept,
+  but to the number of transactions (here edits means transactions).
+  One edit file may contain several transactions (edits).
+  During checkpoint, NameNode will identify the total number of edits to 
retain as extra by
+  checking the latest checkpoint transaction value, subtracted by the value of 
this property.
+  Then, it scans edits files to identify the older ones that don't include the 
computed range of
+  retained transactions that are to be kept around, and purges them 
subsequently.
+  The retainment can be useful for audit purposes or for an HA setup where a 
remote Standby Node may have
   been offline for some time and need to have a longer backlog of retained
   edits in order to start again.
   Typically each edit is on the order of a few hundred bytes, so the default



[02/52] [abbrv] hadoop git commit: HADOOP-11593. Convert site documentation from apt to markdown (stragglers) (Masatake Iwasaki via aw)

2015-02-23 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc1f3e/hadoop-tools/hadoop-sls/src/site/apt/SchedulerLoadSimulator.apt.vm
--
diff --git a/hadoop-tools/hadoop-sls/src/site/apt/SchedulerLoadSimulator.apt.vm 
b/hadoop-tools/hadoop-sls/src/site/apt/SchedulerLoadSimulator.apt.vm
deleted file mode 100644
index a8b408c..000
--- a/hadoop-tools/hadoop-sls/src/site/apt/SchedulerLoadSimulator.apt.vm
+++ /dev/null
@@ -1,439 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~ http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License.
-
-  ---
-  Yarn Scheduler Load Simulator (SLS)
-  ---
-  ---
-  ${maven.build.timestamp}
-
-Yarn Scheduler Load Simulator (SLS)
-
-%{toc|section=1|fromDepth=0}
-
-* Overview
-
-** Overview
-
-  The Yarn scheduler is a fertile area of interest with different
-  implementations, e.g., Fifo, Capacity and Fair schedulers. Meanwhile, several
-  optimizations are also made to improve scheduler performance for different
-  scenarios and workload. Each scheduler algorithm has its own set of features,
-  and drives scheduling decisions by many factors, such as fairness, capacity
-  guarantee, resource availability, etc. It is very important to evaluate a
-  scheduler algorithm very well before we deploy in a production cluster.
-  Unfortunately, currently it is non-trivial to evaluate a scheduler algorithm.
-  Evaluating in a real cluster is always time and cost consuming, and it is
-  also very hard to find a large-enough cluster. Hence, a simulator which can
-  predict how well a scheduler algorithm for some specific workload would be
-  quite useful.
-
-  The Yarn Scheduler Load Simulator (SLS) is such a tool, which can simulate
-  large-scale Yarn clusters and application loads in a single machine.This
-  simulator would be invaluable in furthering Yarn by providing a tool for
-  researchers and developers to prototype new scheduler features and predict
-  their behavior and performance with reasonable amount of confidence,
-  thereby aiding rapid innovation.
-
-  The simulator will exercise the real Yarn ResourceManager removing the
-  network factor by simulating NodeManagers and ApplicationMasters
-  via handling and dispatching NM/AMs heartbeat events from within
-  the same JVM. To keep tracking of scheduler behavior and performance, a
-  scheduler wrapper will wrap the real scheduler.
-
-  The size of the cluster and the application load can be loaded from
-  configuration files, which are generated from job history files directly by
-  adopting {{{https://hadoop.apache.org/docs/stable/rumen.html}Apache Rumen}}.
-
-  The simulator will produce real time metrics while executing, including:
-
-  * Resource usages for whole cluster and each queue, which can be utilized to
-configure cluster and queue's capacity.
-
-  * The detailed application execution trace (recorded in relation to simulated
-time), which can be analyzed to understand/validate the scheduler behavior
-(individual jobs turn around time, throughput, fairness, capacity 
guarantee,
-etc.).
-
-  * Several key metrics of scheduler algorithm, such as time cost of each
-scheduler operation (allocate, handle, etc.), which can be utilized by 
Hadoop
-developers to find the code spots and scalability limits.
-
-** Goals
-
-  * Exercise the scheduler at scale without a real cluster using real job
-traces.
-
-  * Being able to simulate real workloads.
-
-** Architecture
-
-  The following figure illustrates the implementation architecture of the
-  simulator.
-
-[images/sls_arch.png] The architecture of the simulator
-
-  The simulator takes input of workload traces, and fetches the cluster and
-  applications information. For each NM and AM, the simulator builds a 
simulator
-  to simulate their running. All NM/AM simulators run in a thread pool. The
-  simulator reuses Yarn Resource Manager, and builds a wrapper out of the
-  scheduler. The Scheduler Wrapper can track the scheduler behaviors and
-  generates several logs, which are the outputs of the simulator and can be
-  further analyzed.
-
-** Usecases
-
-  * Engineering
-
-* Verify correctness of scheduler algorithm under load
-
-* Cheap/practical way for finding code hotspots/critical-path.
-
-* Validate the impact of changes and new features.
-
-* Determine what drives the scheduler scalability limits.
-
-  []
-
-  * QA
-
-* Validate scheduler behavior for large clusters and several workload
-

[30/52] [abbrv] hadoop git commit: HADOOP-11607. Reduce log spew in S3AFileSystem. (Lei (Eddy) Xu via stevel)

2015-02-23 Thread zhz
HADOOP-11607. Reduce log spew in S3AFileSystem. (Lei (Eddy) Xu via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa1c437b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa1c437b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa1c437b

Branch: refs/heads/HDFS-7285
Commit: aa1c437b6a806de612f030a68984c606c623f1d9
Parents: 02e7dec
Author: Steve Loughran ste...@apache.org
Authored: Fri Feb 20 20:45:05 2015 +
Committer: Steve Loughran ste...@apache.org
Committed: Fri Feb 20 20:51:24 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  2 ++
 .../main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java | 10 +++---
 2 files changed, 9 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa1c437b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8d3f9f5..763377c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -626,6 +626,8 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11440. Use test.build.data instead of build.test.dir for testing
 in ClientBaseWithFixes. (Kengo Seki via aajisaka)
 
+HADOOP-11607. Reduce log spew in S3AFileSystem. (Lei (Eddy) Xu via stevel)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa1c437b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 4de5c13..eaa5f2d 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -354,7 +354,9 @@ public class S3AFileSystem extends FileSystem {
   public FSDataInputStream open(Path f, int bufferSize)
   throws IOException {
 
-LOG.info(Opening ' + f + ' for reading);
+if (LOG.isDebugEnabled()) {
+  LOG.debug(Opening '{}' for reading., f);
+}
 final FileStatus fileStatus = getFileStatus(f);
 if (fileStatus.isDirectory()) {
   throw new FileNotFoundException(Can't open  + f +  because it is a 
directory);
@@ -425,7 +427,9 @@ public class S3AFileSystem extends FileSystem {
* @return true if rename is successful
*/
   public boolean rename(Path src, Path dst) throws IOException {
-LOG.info(Rename path  + src +  to  + dst);
+if (LOG.isDebugEnabled()) {
+  LOG.debug(Rename path {} to {}, src, dst);
+}
 
 String srcKey = pathToKey(src);
 String dstKey = pathToKey(dst);
@@ -441,7 +445,7 @@ public class S3AFileSystem extends FileSystem {
 try {
   srcStatus = getFileStatus(src);
 } catch (FileNotFoundException e) {
-  LOG.info(rename: src not found  + src);
+  LOG.error(rename: src not found {}, src);
   return false;
 }
 



[31/52] [abbrv] hadoop git commit: HADOOP-11604. Prevent ConcurrentModificationException while closing domain sockets during shutdown of DomainSocketWatcher thread. Contributed by Chris Nauroth.

2015-02-23 Thread zhz
HADOOP-11604. Prevent ConcurrentModificationException while closing domain 
sockets during shutdown of DomainSocketWatcher thread. Contributed by Chris 
Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c5ff075
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c5ff075
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c5ff075

Branch: refs/heads/HDFS-7285
Commit: 3c5ff0759c4f4e10c97c6d9036add00edb8be2b5
Parents: aa1c437
Author: cnauroth cnaur...@apache.org
Authored: Fri Feb 20 13:07:16 2015 -0800
Committer: cnauroth cnaur...@apache.org
Committed: Fri Feb 20 13:07:16 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../hadoop/net/unix/DomainSocketWatcher.java| 45 --
 .../net/unix/TestDomainSocketWatcher.java   | 65 ++--
 3 files changed, 105 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c5ff075/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 763377c..b09868a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -978,6 +978,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-9087. Queue size metric for metric sinks isn't actually maintained
 (Akira AJISAKA via jlowe)
 
+HADOOP-11604. Prevent ConcurrentModificationException while closing domain
+sockets during shutdown of DomainSocketWatcher thread. (cnauroth)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c5ff075/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
index 0172f6b..8c617dc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
@@ -246,6 +246,13 @@ public final class DomainSocketWatcher implements 
Closeable {
 this.interruptCheckPeriodMs = interruptCheckPeriodMs;
 notificationSockets = DomainSocket.socketpair();
 watcherThread.setDaemon(true);
+watcherThread.setUncaughtExceptionHandler(
+new Thread.UncaughtExceptionHandler() {
+  @Override
+  public void uncaughtException(Thread thread, Throwable t) {
+LOG.error(thread +  terminating on unexpected exception, t);
+  }
+});
 watcherThread.start();
   }
 
@@ -372,7 +379,17 @@ public final class DomainSocketWatcher implements 
Closeable {
 }
   }
 
-  private void sendCallback(String caller, TreeMapInteger, Entry entries,
+  /**
+   * Send callback and return whether or not the domain socket was closed as a
+   * result of processing.
+   *
+   * @param caller reason for call
+   * @param entries mapping of file descriptor to entry
+   * @param fdSet set of file descriptors
+   * @param fd file descriptor
+   * @return true if the domain socket was closed as a result of processing
+   */
+  private boolean sendCallback(String caller, TreeMapInteger, Entry entries,
   FdSet fdSet, int fd) {
 if (LOG.isTraceEnabled()) {
   LOG.trace(this + :  + caller +  starting sendCallback for fd  + fd);
@@ -401,13 +418,30 @@ public final class DomainSocketWatcher implements 
Closeable {
 still in the poll(2) loop.);
   }
   IOUtils.cleanup(LOG, sock);
-  entries.remove(fd);
   fdSet.remove(fd);
+  return true;
 } else {
   if (LOG.isTraceEnabled()) {
 LOG.trace(this + :  + caller + : sendCallback not  +
 closing fd  + fd);
   }
+  return false;
+}
+  }
+
+  /**
+   * Send callback, and if the domain socket was closed as a result of
+   * processing, then also remove the entry for the file descriptor.
+   *
+   * @param caller reason for call
+   * @param entries mapping of file descriptor to entry
+   * @param fdSet set of file descriptors
+   * @param fd file descriptor
+   */
+  private void sendCallbackAndRemove(String caller,
+  TreeMapInteger, Entry entries, FdSet fdSet, int fd) {
+if (sendCallback(caller, entries, fdSet, fd)) {
+  entries.remove(fd);
 }
   }
 
@@ -427,7 +461,8 @@ public final class DomainSocketWatcher implements Closeable 
{
   lock.lock();
   try {
 

[19/52] [abbrv] hadoop git commit: MAPREDUCE-6228. Add truncate operation to SLive. Constributed by Plamen Jeliazkov.

2015-02-23 Thread zhz
MAPREDUCE-6228. Add truncate operation to SLive. Constributed by Plamen 
Jeliazkov.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a19820f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a19820f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a19820f2

Branch: refs/heads/HDFS-7285
Commit: a19820f2fb2000a789a114f8ed55cb7e071723c8
Parents: 946456c
Author: Plamen Jeliazkov plamenj2...@gmail.com
Authored: Thu Feb 19 00:02:49 2015 -0800
Committer: Konstantin V Shvachko s...@apache.org
Committed: Thu Feb 19 00:02:49 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|   6 +-
 .../apache/hadoop/fs/slive/ArgumentParser.java  |   2 +
 .../apache/hadoop/fs/slive/ConfigExtractor.java |  59 ++
 .../apache/hadoop/fs/slive/ConfigMerger.java|  35 ++
 .../apache/hadoop/fs/slive/ConfigOption.java|   9 ++
 .../org/apache/hadoop/fs/slive/Constants.java   |   4 +-
 .../hadoop/fs/slive/OperationFactory.java   |   3 +
 .../org/apache/hadoop/fs/slive/TestSlive.java   |  27 +
 .../org/apache/hadoop/fs/slive/TruncateOp.java  | 114 +++
 9 files changed, 255 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a19820f2/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 7f4c3e7..fd40439 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -248,6 +248,10 @@ Release 2.7.0 - UNRELEASED
 
   NEW FEATURES
 
+MAPREDUCE-6227. DFSIO for truncate. (shv via yliu)
+
+MAPREDUCE-6228. Add truncate operation to SLive. (Plamen Jeliazkov via shv)
+
   IMPROVEMENTS
 
 MAPREDUCE-6149. Document override log4j.properties in MR job.
@@ -284,8 +288,6 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-5800. Use Job#getInstance instead of deprecated constructors
 (aajisaka)
 
-MAPREDUCE-6227. DFSIO for truncate. (shv via yliu)
-
 MAPREDUCE-6253. Update use of Iterator to Iterable. (Ray Chiang via 
devaraj)
 
 MAPREDUCE-5335. Rename Job Tracker terminology in ShuffleSchedulerImpl.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a19820f2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ArgumentParser.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ArgumentParser.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ArgumentParser.java
index 19a55ff..12df4dc 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ArgumentParser.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ArgumentParser.java
@@ -144,6 +144,7 @@ class ArgumentParser {
 cliopt.addOption(ConfigOption.DURATION);
 cliopt.addOption(ConfigOption.EXIT_ON_ERROR);
 cliopt.addOption(ConfigOption.SLEEP_TIME);
+cliopt.addOption(ConfigOption.TRUNCATE_WAIT);
 cliopt.addOption(ConfigOption.FILES);
 cliopt.addOption(ConfigOption.DIR_SIZE);
 cliopt.addOption(ConfigOption.BASE_DIR);
@@ -167,6 +168,7 @@ class ArgumentParser {
 cliopt.addOption(ConfigOption.READ_SIZE);
 cliopt.addOption(ConfigOption.WRITE_SIZE);
 cliopt.addOption(ConfigOption.APPEND_SIZE);
+cliopt.addOption(ConfigOption.TRUNCATE_SIZE);
 cliopt.addOption(ConfigOption.RANDOM_SEED);
 cliopt.addOption(ConfigOption.QUEUE_NAME);
 cliopt.addOption(ConfigOption.HELP);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a19820f2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ConfigExtractor.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ConfigExtractor.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ConfigExtractor.java
index a03c812..ef4e436 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ConfigExtractor.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ConfigExtractor.java
@@ -131,6 

[41/50] [abbrv] hadoop git commit: YARN-2899. Run TestDockerContainerExecutorWithMocks on Linux only. Contributed by Ming Ma.

2015-02-23 Thread zhz
YARN-2899. Run TestDockerContainerExecutorWithMocks on Linux only. Contributed 
by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3fc81fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3fc81fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3fc81fa

Branch: refs/heads/HDFS-EC
Commit: a3fc81fa0e62287475920f8ae4019b4a4b5569a3
Parents: b6fc5b8
Author: cnauroth cnaur...@apache.org
Authored: Fri Feb 13 21:58:50 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:50 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt| 3 +++
 .../nodemanager/TestDockerContainerExecutorWithMocks.java  | 6 --
 2 files changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3fc81fa/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1644268..e9a7c58 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -596,6 +596,9 @@ Release 2.7.0 - UNRELEASED
 YARN-2847. Linux native container executor segfaults if default banned
 user detected (Olaf Flebbe via jlowe)
 
+YARN-2899. Run TestDockerContainerExecutorWithMocks on Linux only.
+(Ming Ma via cnauroth)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3fc81fa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutorWithMocks.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutorWithMocks.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutorWithMocks.java
index fa8bfaf..3584fed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutorWithMocks.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutorWithMocks.java
@@ -69,7 +69,7 @@ public class TestDockerContainerExecutorWithMocks {
 
   @Before
   public void setup() {
-assumeTrue(!Path.WINDOWS);
+assumeTrue(Shell.LINUX);
 File f = new File(./src/test/resources/mock-container-executor);
 if(!FileUtil.canExecute(f)) {
   FileUtil.setExecutable(f, true);
@@ -101,7 +101,9 @@ public class TestDockerContainerExecutorWithMocks {
   @After
   public void tearDown() {
 try {
-  lfs.delete(workDir, true);
+  if (lfs != null) {
+lfs.delete(workDir, true);
+  }
 } catch (IOException e) {
   throw new RuntimeException(e);
 }



[44/52] [abbrv] hadoop git commit: HDFS-7652. Process block reports for erasure coded blocks. Contributed by Zhe Zhang

2015-02-23 Thread zhz
HDFS-7652. Process block reports for erasure coded blocks. Contributed by Zhe 
Zhang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6caf9c77
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6caf9c77
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6caf9c77

Branch: refs/heads/HDFS-7285
Commit: 6caf9c77c60321897c536101a801fcdba8e981ff
Parents: 6d6fffc
Author: Zhe Zhang z...@apache.org
Authored: Mon Feb 9 10:27:14 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 23 11:19:51 2015 -0800

--
 .../server/blockmanagement/BlockIdManager.java|  8 
 .../hdfs/server/blockmanagement/BlockManager.java | 18 +-
 2 files changed, 21 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6caf9c77/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index c8b9d20..e7f8a05 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -211,4 +211,12 @@ public class BlockIdManager {
   .LAST_RESERVED_BLOCK_ID);
 generationStampV1Limit = GenerationStamp.GRANDFATHER_GENERATION_STAMP;
   }
+
+  public static boolean isStripedBlockID(long id) {
+return id  0;
+  }
+
+  public static long convertToGroupID(long id) {
+return id  (~(HdfsConstants.MAX_BLOCKS_IN_GROUP - 1));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6caf9c77/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 58a8b94..f4369b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1876,7 +1876,7 @@ public class BlockManager {
   break;
 }
 
-BlockInfoContiguous bi = blocksMap.getStoredBlock(b);
+BlockInfoContiguous bi = getStoredBlock(b);
 if (bi == null) {
   if (LOG.isDebugEnabled()) {
 LOG.debug(BLOCK* rescanPostponedMisreplicatedBlocks:  +
@@ -1981,7 +1981,7 @@ public class BlockManager {
 continue;
   }
   
-  BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(iblk);
+  BlockInfoContiguous storedBlock = getStoredBlock(iblk);
   // If block does not belong to any file, we are done.
   if (storedBlock == null) continue;
   
@@ -2123,7 +2123,7 @@ public class BlockManager {
 }
 
 // find block by blockId
-BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(block);
+BlockInfoContiguous storedBlock = getStoredBlock(block);
 if(storedBlock == null) {
   // If blocksMap does not contain reported block id,
   // the replica should be removed from the data-node.
@@ -2414,7 +2414,7 @@ public class BlockManager {
 DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
 if (block instanceof BlockInfoContiguousUnderConstruction) {
   //refresh our copy in case the block got completed in another thread
-  storedBlock = blocksMap.getStoredBlock(block);
+  storedBlock = getStoredBlock(block);
 } else {
   storedBlock = block;
 }
@@ -3360,7 +3360,15 @@ public class BlockManager {
   }
 
   public BlockInfoContiguous getStoredBlock(Block block) {
-return blocksMap.getStoredBlock(block);
+BlockInfoContiguous info = null;
+if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+  info = blocksMap.getStoredBlock(
+  new Block(BlockIdManager.convertToGroupID(block.getBlockId(;
+}
+if (info == null) {
+  info = blocksMap.getStoredBlock(block);
+}
+return info;
   }
 
   /** updates a block in under replication queue */



[22/52] [abbrv] hadoop git commit: YARN-933. Fixed InvalidStateTransitonException at FINAL_SAVING state in RMApp. Contributed by Rohith Sharmaks

2015-02-23 Thread zhz
YARN-933. Fixed InvalidStateTransitonException at FINAL_SAVING state in RMApp. 
Contributed by Rohith Sharmaks


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0d9b939
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0d9b939
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0d9b939

Branch: refs/heads/HDFS-7285
Commit: c0d9b93953767608dfe429ddb9bd4c1c3bd3debf
Parents: d49ae72
Author: Jian He jia...@apache.org
Authored: Thu Feb 19 15:42:39 2015 -0800
Committer: Jian He jia...@apache.org
Committed: Thu Feb 19 15:42:39 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../rmapp/attempt/RMAppAttemptImpl.java |  2 ++
 .../attempt/TestRMAppAttemptTransitions.java| 30 
 3 files changed, 35 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0d9b939/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index aab3496..cac6680 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -623,6 +623,9 @@ Release 2.7.0 - UNRELEASED
 YARN-1615. Fix typos in description about delay scheduling. (Akira Ajisaka 
via 
 ozawa)
 
+YARN-933. Fixed InvalidStateTransitonException at FINAL_SAVING state in
+RMApp. (Rohith Sharmaks via jianhe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0d9b939/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 1a19eee..1be1727 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -354,6 +354,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
   EnumSet.of(
   RMAppAttemptEventType.UNREGISTERED,
   RMAppAttemptEventType.STATUS_UPDATE,
+  RMAppAttemptEventType.LAUNCHED,
+  RMAppAttemptEventType.LAUNCH_FAILED,
 // should be fixed to reject container allocate request at Final
 // Saving in scheduler
   RMAppAttemptEventType.CONTAINER_ALLOCATED,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0d9b939/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
index 9f5ae79..c074ad9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
@@ -919,6 +919,36 @@ public class TestRMAppAttemptTransitions {
 testAppAttemptFailedState(amContainer, diagnostics);
   }
   
+  @Test(timeout = 1)
+  public void testLaunchedAtFinalSaving() {
+Container amContainer = allocateApplicationAttempt();
+
+// ALLOCATED-FINAL_SAVING
+applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt
+.getAppAttemptId(), RMAppAttemptEventType.KILL));
+assertEquals(RMAppAttemptState.FINAL_SAVING,
+applicationAttempt.getAppAttemptState());
+
+// verify for both launched and launch_failed 

[37/50] [abbrv] hadoop git commit: HDFS-4265. BKJM doesn't take advantage of speculative reads. Contributed by Rakesh R.

2015-02-23 Thread zhz
HDFS-4265. BKJM doesn't take advantage of speculative reads. Contributed by 
Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08bc0c03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08bc0c03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08bc0c03

Branch: refs/heads/HDFS-EC
Commit: 08bc0c037b9b6761c84dfbb4e538b1b4e5f92bbc
Parents: 6384707
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri Feb 13 15:20:52 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:50 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../bkjournal/BookKeeperJournalManager.java |  32 +++-
 .../TestBookKeeperSpeculativeRead.java  | 167 +++
 3 files changed, 195 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08bc0c03/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 610d45c..1ec2bd2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -962,6 +962,8 @@ Release 2.7.0 - UNRELEASED
   HDFS-7776. Adding additional unit tests for Quota By Storage Type.
   (Xiaoyu Yao via Arpit Agarwal)
 
+  HDFS-4625. BKJM doesn't take advantage of speculative reads. (Rakesh R
+  via aajisaka)
 
 Release 2.6.1 - UNRELEASED
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08bc0c03/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
index 227be6b..51905c0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
@@ -59,6 +59,7 @@ import 
org.apache.hadoop.contrib.bkjournal.BKJournalProtos.VersionProto;
 import com.google.protobuf.TextFormat;
 import static com.google.common.base.Charsets.UTF_8;
 
+import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import com.google.common.annotations.VisibleForTesting;
@@ -142,6 +143,15 @@ public class BookKeeperJournalManager implements 
JournalManager {
   public static final String BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT
 = /ledgers/available;
 
+  public static final String BKJM_BOOKKEEPER_SPECULATIVE_READ_TIMEOUT_MS
+= dfs.namenode.bookkeeperjournal.speculativeReadTimeoutMs;
+  public static final int BKJM_BOOKKEEPER_SPECULATIVE_READ_TIMEOUT_DEFAULT
+= 2000;
+
+  public static final String BKJM_BOOKKEEPER_READ_ENTRY_TIMEOUT_SEC
+= dfs.namenode.bookkeeperjournal.readEntryTimeoutSec;
+  public static final int BKJM_BOOKKEEPER_READ_ENTRY_TIMEOUT_DEFAULT = 5;
+
   private ZooKeeper zkc;
   private final Configuration conf;
   private final BookKeeper bkc;
@@ -153,6 +163,8 @@ public class BookKeeperJournalManager implements 
JournalManager {
   private final int ensembleSize;
   private final int quorumSize;
   private final String digestpw;
+  private final int speculativeReadTimeout;
+  private final int readEntryTimeout;
   private final CountDownLatch zkConnectLatch;
   private final NamespaceInfo nsInfo;
   private boolean initialized = false;
@@ -172,6 +184,11 @@ public class BookKeeperJournalManager implements 
JournalManager {
BKJM_BOOKKEEPER_ENSEMBLE_SIZE_DEFAULT);
 quorumSize = conf.getInt(BKJM_BOOKKEEPER_QUORUM_SIZE,
  BKJM_BOOKKEEPER_QUORUM_SIZE_DEFAULT);
+speculativeReadTimeout = conf.getInt(
+ BKJM_BOOKKEEPER_SPECULATIVE_READ_TIMEOUT_MS,
+ BKJM_BOOKKEEPER_SPECULATIVE_READ_TIMEOUT_DEFAULT);
+readEntryTimeout = conf.getInt(BKJM_BOOKKEEPER_READ_ENTRY_TIMEOUT_SEC,
+ BKJM_BOOKKEEPER_READ_ENTRY_TIMEOUT_DEFAULT);
 
 ledgerPath = basePath + /ledgers;
 String maxTxIdPath = basePath + /maxtxid;
@@ -196,7 +213,10 @@ public class BookKeeperJournalManager implements 
JournalManager {
   }
 
   prepareBookKeeperEnv();
-  bkc = new BookKeeper(new ClientConfiguration(), zkc);
+  ClientConfiguration 

[41/52] [abbrv] hadoop git commit: YARN-2797. TestWorkPreservingRMRestart should use ParametrizedSchedulerTestBase. Contributed by Karthik Kambatla

2015-02-23 Thread zhz
YARN-2797. TestWorkPreservingRMRestart should use
ParametrizedSchedulerTestBase. Contributed by Karthik Kambatla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe7a3024
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe7a3024
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe7a3024

Branch: refs/heads/HDFS-7285
Commit: fe7a302473251b7310105a936edf220e401c613f
Parents: e3d2902
Author: Xuan xg...@apache.org
Authored: Sat Feb 21 19:17:29 2015 -0800
Committer: Xuan xg...@apache.org
Committed: Sat Feb 21 19:17:29 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../ParameterizedSchedulerTestBase.java | 12 +++
 .../TestWorkPreservingRMRestart.java| 79 +++-
 3 files changed, 25 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe7a3024/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1982688..3507420 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -318,6 +318,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3236. Cleanup RMAuthenticationFilter#AUTH_HANDLER_PROPERTY.
 (zhihai xu via xgong)
 
+YARN-2797. TestWorkPreservingRMRestart should use 
ParametrizedSchedulerTestBase
+(Karthik Kambatla via xgong)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe7a3024/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
index cfd1600..b099836 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
@@ -83,10 +83,22 @@ public abstract class ParameterizedSchedulerTestBase {
 out.println(?xml version=\1.0\?);
 out.println(allocations);
 out.println(queueMaxAMShareDefault-1.0/queueMaxAMShareDefault);
+
out.println(defaultQueueSchedulingPolicyfair/defaultQueueSchedulingPolicy);
+out.println(queue name=\root\);
+out.println(  schedulingPolicydrf/schedulingPolicy);
+out.println(  weight1.0/weight);
+out.println(  
fairSharePreemptionTimeout100/fairSharePreemptionTimeout);
+out.println(  
minSharePreemptionTimeout120/minSharePreemptionTimeout);
+out.println(  
fairSharePreemptionThreshold.5/fairSharePreemptionThreshold);
+out.println(/queue);
 out.println(/allocations);
 out.close();
 
 conf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName());
 conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, FS_ALLOC_FILE);
   }
+
+  public SchedulerType getSchedulerType() {
+return schedulerType;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe7a3024/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
index a9caf77..3033496 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
@@ -97,23 +97,24 @@ import com.google.common.base.Supplier;
 
 @SuppressWarnings({rawtypes, 

[26/52] [abbrv] hadoop git commit: YARN-2799. Cleanup TestLogAggregationService based on the change in YARN-90. Contributed by Zhihai Xu

2015-02-23 Thread zhz
YARN-2799. Cleanup TestLogAggregationService based on the change in YARN-90. 
Contributed by Zhihai Xu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c33ae271
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c33ae271
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c33ae271

Branch: refs/heads/HDFS-7285
Commit: c33ae271c24f0770c9735ccd2086cafda4f4e0b2
Parents: a64dd3d
Author: Junping Du junping...@apache.org
Authored: Fri Feb 20 09:43:39 2015 -0800
Committer: Junping Du junping...@apache.org
Committed: Fri Feb 20 09:43:39 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../TestLogAggregationService.java  | 25 +++-
 2 files changed, 6 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c33ae271/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8ec2409..e71da2d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -307,6 +307,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3076. Add API/Implementation to YarnClient to retrieve label-to-node 
 mapping. (Varun Saxena via wangda)
 
+YARN-2799. Cleanup TestLogAggregationService based on the change in 
YARN-90.
+(Zhihai Xu via junping_du)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c33ae271/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
index 7d911e9..901e45a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
@@ -248,7 +248,6 @@ public class TestLogAggregationService extends 
BaseContainerManagerTest {
 };
 
 checkEvents(appEventHandler, expectedEvents, true, getType, 
getApplicationID);
-dispatcher.stop();
   }
 
   @Test
@@ -295,7 +294,6 @@ public class TestLogAggregationService extends 
BaseContainerManagerTest {
 ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)
 };
 checkEvents(appEventHandler, expectedEvents, true, getType, 
getApplicationID);
-dispatcher.stop();
 logAggregationService.close();
   }
 
@@ -308,10 +306,7 @@ public class TestLogAggregationService extends 
BaseContainerManagerTest {
 this.remoteRootLogDir.getAbsolutePath());
 
 String[] fileNames = new String[] { stdout, stderr, syslog };
-DrainDispatcher dispatcher = createDispatcher();
-EventHandlerApplicationEvent appEventHandler = mock(EventHandler.class);
-dispatcher.register(ApplicationEventType.class, appEventHandler);
-
+
 LogAggregationService logAggregationService =
 new LogAggregationService(dispatcher, this.context, this.delSrvc,
   super.dirsHandler);
@@ -441,7 +436,6 @@ public class TestLogAggregationService extends 
BaseContainerManagerTest {
 ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)
 };
 checkEvents(appEventHandler, expectedFinishedEvents, false, getType, 
getApplicationID);
-dispatcher.stop();
   }
   
   @Test
@@ -518,8 +512,7 @@ public class TestLogAggregationService extends 
BaseContainerManagerTest {
 File aNewFile = new File(String.valueOf(tmp+System.currentTimeMillis()));
 this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, 
 aNewFile.getAbsolutePath());
-
-DrainDispatcher dispatcher = createDispatcher();
+
 LogAggregationService logAggregationService = spy(
 new LogAggregationService(dispatcher, this.context, this.delSrvc,
   super.dirsHandler));
@@ -590,6 +583,7 @@ public class TestLogAggregationService extends 
BaseContainerManagerTest {
 

[29/52] [abbrv] hadoop git commit: HDFS-7773. Additional metrics in HDFS to be accessed via jmx. Contributed by Anu Engineer.

2015-02-23 Thread zhz
HDFS-7773. Additional metrics in HDFS to be accessed via jmx. Contributed by 
Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02e7dec7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02e7dec7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02e7dec7

Branch: refs/heads/HDFS-7285
Commit: 02e7dec79d2d4f2b801435343219d8fb53ec931f
Parents: 8c6ae0d
Author: cnauroth cnaur...@apache.org
Authored: Fri Feb 20 12:37:48 2015 -0800
Committer: cnauroth cnaur...@apache.org
Committed: Fri Feb 20 12:37:48 2015 -0800

--
 .../hadoop-common/src/site/markdown/Metrics.md  |  5 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/datanode/BlockReceiver.java |  1 +
 .../hdfs/server/datanode/DataXceiver.java   | 26 +++
 .../datanode/metrics/DataNodeMetrics.java   | 36 ++--
 .../namenode/metrics/NameNodeMetrics.java   | 25 +++
 .../server/datanode/TestDataNodeMetrics.java| 45 
 .../namenode/metrics/TestNameNodeMetrics.java   | 20 +
 8 files changed, 148 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02e7dec7/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 6953c3b..0e0fc09 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -191,6 +191,7 @@ Each metrics record contains tags such as ProcessName, 
SessionId, and Hostname a
 | `GetImageAvgTime` | Average fsimage download time in milliseconds |
 | `PutImageNumOps` | Total number of fsimage uploads to SecondaryNameNode |
 | `PutImageAvgTime` | Average fsimage upload time in milliseconds |
+| `TotalFileOps`| Total number of file operations performed |
 
 FSNamesystem
 
@@ -314,6 +315,10 @@ Each metrics record contains tags such as SessionId and 
Hostname as additional i
 | `SendDataPacketBlockedOnNetworkNanosAvgTime` | Average waiting time of 
sending packets in nanoseconds |
 | `SendDataPacketTransferNanosNumOps` | Total number of sending packets |
 | `SendDataPacketTransferNanosAvgTime` | Average transfer time of sending 
packets in nanoseconds |
+| `TotalWriteTime`| Total number of milliseconds spent on write operation |
+| `TotalReadTime` | Total number of milliseconds spent on read operation |
+| `RemoteBytesRead` | Number of bytes read by remote clients |
+| `RemoteBytesWritten` | Number of bytes written by remote clients |
 
 yarn context
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02e7dec7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7d9d0ea..5c472a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -663,6 +663,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7772. Document hdfs balancer -exclude/-include option in
 HDFSCommands.html (Xiaoyu Yao via cnauroth)
 
+HDFS-7773. Additional metrics in HDFS to be accessed via jmx.
+(Anu Engineer via cnauroth)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02e7dec7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 368d80d..1db2c78 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -658,6 +658,7 @@ class BlockReceiver implements Closeable {
   replicaInfo.setLastChecksumAndDataLen(offsetInBlock, lastCrc);
 
   datanode.metrics.incrBytesWritten(len);
+  datanode.metrics.incrTotalWriteTime(duration);
 
   manageWriterOsCache(offsetInBlock);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02e7dec7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
--
diff --git 

[21/52] [abbrv] hadoop git commit: YARN-3076. Add API/Implementation to YarnClient to retrieve label-to-node mapping (Varun Saxena via wangda)

2015-02-23 Thread zhz
YARN-3076. Add API/Implementation to YarnClient to retrieve label-to-node 
mapping (Varun Saxena via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d49ae725
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d49ae725
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d49ae725

Branch: refs/heads/HDFS-7285
Commit: d49ae725d5fa3eecf879ac42c42a368dd811f854
Parents: f0f2992
Author: Wangda Tan wan...@apache.org
Authored: Thu Feb 19 11:00:57 2015 -0800
Committer: Wangda Tan wan...@apache.org
Committed: Thu Feb 19 11:00:57 2015 -0800

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |  12 ++
 .../hadoop/mapred/TestClientRedirect.java   |   8 +
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/api/ApplicationClientProtocol.java |  18 ++
 .../GetLabelsToNodesRequest.java|  41 +
 .../GetLabelsToNodesResponse.java   |  45 +
 .../main/proto/applicationclient_protocol.proto |   1 +
 .../src/main/proto/yarn_protos.proto|   5 +
 .../src/main/proto/yarn_service_protos.proto|   8 +
 .../hadoop/yarn/client/api/YarnClient.java  |  31 
 .../yarn/client/api/impl/YarnClientImpl.java|  15 ++
 .../yarn/client/api/impl/TestYarnClient.java|  75 +++-
 .../ApplicationClientProtocolPBClientImpl.java  |  19 ++
 .../ApplicationClientProtocolPBServiceImpl.java |  21 +++
 .../impl/pb/GetLabelsToNodesRequestPBImpl.java  | 121 
 .../impl/pb/GetLabelsToNodesResponsePBImpl.java | 184 +++
 .../hadoop/yarn/api/TestPBImplRecords.java  |  12 ++
 .../server/resourcemanager/ClientRMService.java |  15 ++
 .../resourcemanager/TestClientRMService.java|  75 
 19 files changed, 708 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d49ae725/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 06667ee..82e8bdb 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -440,6 +440,18 @@ public class ResourceMgrDelegate extends YarnClient {
   }
 
   @Override
+  public MapString, SetNodeId getLabelsToNodes() throws YarnException,
+  IOException {
+return client.getLabelsToNodes();
+  }
+
+  @Override
+  public MapString, SetNodeId getLabelsToNodes(SetString labels)
+  throws YarnException, IOException {
+return client.getLabelsToNodes(labels);
+  }
+
+  @Override
   public SetString getClusterNodeLabels()
   throws YarnException, IOException {
 return client.getClusterNodeLabels();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d49ae725/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index 0af5a71..bb00b19 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
@@ -90,6 +90,8 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
 import 

[17/52] [abbrv] hadoop git commit: HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)

2015-02-23 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/946456c6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index aad63d3..ca204a6 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -23,6 +23,7 @@ import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.Iterator;
+import java.util.Locale;
 import java.util.Map;
 import java.util.Timer;
 import java.util.TimerTask;
@@ -711,7 +712,8 @@ public class JobHistoryEventHandler extends AbstractService
   private void processEventForTimelineServer(HistoryEvent event, JobId jobId,
   long timestamp) {
 TimelineEvent tEvent = new TimelineEvent();
-tEvent.setEventType(event.getEventType().name().toUpperCase());
+tEvent.setEventType(
+event.getEventType().name().toUpperCase(Locale.ENGLISH));
 tEvent.setTimestamp(timestamp);
 TimelineEntity tEntity = new TimelineEntity();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/946456c6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
index 53f21db..440ff49 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
@@ -227,7 +227,7 @@ public class AppController extends Controller implements 
AMParams {
   try {
 String tt = $(TASK_TYPE);
 tt = tt.isEmpty() ? All : StringUtils.capitalize(MRApps.taskType(tt).
-toString().toLowerCase(Locale.US));
+toString().toLowerCase(Locale.ENGLISH));
 setTitle(join(tt,  Tasks for , $(JOB_ID)));
   } catch (Exception e) {
 LOG.error(Failed to render tasks page with task type : 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/946456c6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
index 553ba70..e0c4773 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.mapreduce;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Locale;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.JobPriority;
@@ -314,7 +315,7 @@ public class TypeConverter {
   QueueState state) {
 org.apache.hadoop.mapreduce.QueueState qState =
   org.apache.hadoop.mapreduce.QueueState.getState(
-state.toString().toLowerCase());
+state.toString().toLowerCase(Locale.ENGLISH));
 return qState;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/946456c6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
--
diff --git 

svn commit: r1661744 - /hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

2015-02-23 Thread wheat9
Author: wheat9
Date: Mon Feb 23 18:54:17 2015
New Revision: 1661744

URL: http://svn.apache.org/r1661744
Log:
Add wheat9 as the Hadoop PMC member.


Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml?rev=1661744r1=1661743r2=1661744view=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml Mon 
Feb 23 18:54:17 2015
@@ -455,6 +455,14 @@
  td-8/td
/tr
 
+   tr
+ tdwheat9/td
+ tda href=http://haohui.me;Haohui Mai/a/td
+ tdHortonworks/td
+ td/td
+ td-8/td
+   /tr
+
 tr
 tdzjshen/td
 tda href=http://people.apache.org/~zjshen;Zhijie Shen/a/td
@@ -1149,7 +1157,7 @@
 
tr
  tdwheat9/td
- tdHaohui Mai/td
+ tda href=http://haohui.me;Haohui Mai/a/td
  tdHortonworks/td
  td/td
  td-8/td




[45/50] [abbrv] hadoop git commit: MAPREDUCE-6225. Fix new findbug warnings in hadoop-mapreduce-client-core. Contributed by Varun Saxena

2015-02-23 Thread zhz
MAPREDUCE-6225. Fix new findbug warnings in hadoop-mapreduce-client-core. 
Contributed by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49204fcf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49204fcf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49204fcf

Branch: refs/heads/HDFS-EC
Commit: 49204fcf18112f4879f05fea03dd807da2299b87
Parents: c07f1de
Author: Junping Du junping...@apache.org
Authored: Mon Feb 16 09:38:05 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:51 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/mapred/IndexCache.java|  2 +-
 .../org/apache/hadoop/mapred/TaskLogAppender.java|  2 +-
 .../hadoop/mapred/lib/CombineFileRecordReader.java   |  5 -
 .../org/apache/hadoop/mapreduce/JobSubmitter.java| 15 ++-
 .../mapreduce/lib/fieldsel/FieldSelectionHelper.java |  2 +-
 .../mapreduce/lib/input/CombineFileRecordReader.java |  5 -
 .../mapreduce/security/SecureShuffleUtils.java   | 15 ++-
 .../hadoop/mapreduce/task/reduce/InMemoryReader.java |  4 +---
 .../hadoop/mapreduce/util/ResourceBundles.java   |  2 +-
 10 files changed, 24 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49204fcf/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index bb9e105..9ef7a32 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -294,6 +294,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6256. Removed unused private methods in o.a.h.mapreduce.Job.java.
 (Naganarasimha G R via ozawa)
 
+MAPREDUCE-6225. Fix new findbug warnings in hadoop-mapreduce-client-core. 
+(Varun Saxena via junping_du)
+
   OPTIMIZATIONS
 
 MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49204fcf/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IndexCache.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IndexCache.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IndexCache.java
index 54add3a..c3db951 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IndexCache.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IndexCache.java
@@ -145,7 +145,7 @@ class IndexCache {
*/
   public void removeMap(String mapId) {
 IndexInformation info = cache.get(mapId);
-if (info == null || ((info != null)  isUnderConstruction(info))) {
+if (info == null || isUnderConstruction(info)) {
   return;
 }
 info = cache.remove(mapId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49204fcf/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLogAppender.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLogAppender.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLogAppender.java
index 2162a26..d10b764 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLogAppender.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLogAppender.java
@@ -75,7 +75,7 @@ public class TaskLogAppender extends FileAppender implements 
Flushable {
 
 if (maxEvents == null) {
   String propValue = System.getProperty(LOGSIZE_PROPERTY, 0);
-  setTotalLogFileSize(Long.valueOf(propValue));
+  setTotalLogFileSize(Long.parseLong(propValue));
 }
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49204fcf/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineFileRecordReader.java
--
diff --git 

[49/50] [abbrv] hadoop git commit: YARN-3203. Correct a log message in AuxServices. Contributed by Brahma Reddy Battula.

2015-02-23 Thread zhz
YARN-3203. Correct a log message in AuxServices. Contributed by Brahma Reddy 
Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da1dfed6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da1dfed6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da1dfed6

Branch: refs/heads/HDFS-EC
Commit: da1dfed6195993dda2fd72ea8d00c55b7f8892b4
Parents: 7f09f8d
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon Feb 16 23:55:58 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:51 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../yarn/server/nodemanager/containermanager/AuxServices.java | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da1dfed6/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e00e447..7161dc3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -295,6 +295,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3182. Cleanup switch statement in ApplicationMasterLauncher#handle().
 (Ray Chiang via ozawa)
 
+YARN-3203. Correct a log message in AuxServices. (Brahma Reddy Battula 
+via ozawa)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da1dfed6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
index bf02679..fb6f79b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
@@ -128,7 +128,7 @@ public class AuxServices extends AbstractService
 // TODO better use s.getName()?
 if(!sName.equals(s.getName())) {
   LOG.warn(The Auxilurary Service named '+sName+' in the 
-  +configuration is for class +sClass+ which has 
+  +configuration is for +sClass+ which has 
   +a name of '+s.getName()+'. Because these are 
   +not the same tools trying to send ServiceData and read 
   +Service Meta Data may have issues unless the refer to 



[04/50] [abbrv] hadoop git commit: YARN-3158. Correct log messages in ResourceTrackerService. Contributed by Varun Saxena

2015-02-23 Thread zhz
YARN-3158. Correct log messages in ResourceTrackerService. Contributed
by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eebef84a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eebef84a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eebef84a

Branch: refs/heads/HDFS-EC
Commit: eebef84a56b9f1b796d80edf1a2c26bdb8c63586
Parents: 4b1b229
Author: Xuan xg...@apache.org
Authored: Thu Feb 12 16:42:15 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:47 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../yarn/server/resourcemanager/ResourceTrackerService.java  | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eebef84a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index bb19394..7117fbd 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -284,6 +284,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3124. Fixed CS LeafQueue/ParentQueue to use QueueCapacities to track
 capacities-by-label. (Wangda Tan via jianhe)
 
+YARN-3158. Correct log messages in ResourceTrackerService.
+(Varun Saxena via xgong)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eebef84a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index 4beb895..61a0349 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -209,14 +209,14 @@ public class ResourceTrackerService extends 
AbstractService implements
 if (rmApp == null) {
   LOG.error(Received finished container : 
   + containerStatus.getContainerId()
-  + for unknown application  + appAttemptId.getApplicationId()
+  +  for unknown application  + appAttemptId.getApplicationId()
   +  Skipping.);
   return;
 }
 
 if (rmApp.getApplicationSubmissionContext().getUnmanagedAM()) {
   if (LOG.isDebugEnabled()) {
-LOG.debug(Ignoring container completion status for unmanaged AM
+LOG.debug(Ignoring container completion status for unmanaged AM 
 + rmApp.getApplicationId());
   }
   return;



[28/50] [abbrv] hadoop git commit: HDFS-7668. Convert site documentation from apt to markdown (Masatake Iwasaki via aw)

2015-02-23 Thread zhz
HDFS-7668. Convert site documentation from apt to markdown (Masatake Iwasaki 
via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a45ef2b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a45ef2b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a45ef2b6

Branch: refs/heads/HDFS-EC
Commit: a45ef2b6440844a4cefbbb3094921659cd5e80a0
Parents: 812817c
Author: Allen Wittenauer a...@apache.org
Authored: Thu Feb 12 18:19:45 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:48 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |3 +
 .../src/site/apt/ArchivalStorage.apt.vm |  233 --
 .../site/apt/CentralizedCacheManagement.apt.vm  |  344 ---
 .../src/site/apt/ExtendedAttributes.apt.vm  |   97 -
 .../src/site/apt/FaultInjectFramework.apt.vm|  312 ---
 .../hadoop-hdfs/src/site/apt/Federation.apt.vm  |  339 ---
 .../src/site/apt/HDFSCommands.apt.vm|  797 --
 .../site/apt/HDFSHighAvailabilityWithNFS.apt.vm |  859 --
 .../site/apt/HDFSHighAvailabilityWithQJM.apt.vm |  816 --
 .../hadoop-hdfs/src/site/apt/HdfsDesign.apt.vm  |  510 
 .../src/site/apt/HdfsEditsViewer.apt.vm |  104 -
 .../src/site/apt/HdfsImageViewer.apt.vm |  247 --
 .../src/site/apt/HdfsMultihoming.apt.vm |  145 -
 .../src/site/apt/HdfsNfsGateway.apt.vm  |  364 ---
 .../src/site/apt/HdfsPermissionsGuide.apt.vm|  438 ---
 .../src/site/apt/HdfsQuotaAdminGuide.apt.vm |  116 -
 .../src/site/apt/HdfsUserGuide.apt.vm   |  556 
 .../hadoop-hdfs/src/site/apt/LibHdfs.apt.vm |  101 -
 .../src/site/apt/SLGUserGuide.apt.vm|  195 --
 .../src/site/apt/ShortCircuitLocalReads.apt.vm  |  112 -
 .../src/site/apt/TransparentEncryption.apt.vm   |  290 --
 .../hadoop-hdfs/src/site/apt/ViewFs.apt.vm  |  304 --
 .../hadoop-hdfs/src/site/apt/WebHDFS.apt.vm | 2628 --
 .../src/site/markdown/ArchivalStorage.md|  160 ++
 .../site/markdown/CentralizedCacheManagement.md |  268 ++
 .../src/site/markdown/ExtendedAttributes.md |   98 +
 .../src/site/markdown/FaultInjectFramework.md   |  254 ++
 .../hadoop-hdfs/src/site/markdown/Federation.md |  254 ++
 .../src/site/markdown/HDFSCommands.md   |  505 
 .../markdown/HDFSHighAvailabilityWithNFS.md |  678 +
 .../markdown/HDFSHighAvailabilityWithQJM.md |  642 +
 .../hadoop-hdfs/src/site/markdown/HdfsDesign.md |  240 ++
 .../src/site/markdown/HdfsEditsViewer.md|   69 +
 .../src/site/markdown/HdfsImageViewer.md|  172 ++
 .../src/site/markdown/HdfsMultihoming.md|  127 +
 .../src/site/markdown/HdfsNfsGateway.md |  254 ++
 .../src/site/markdown/HdfsPermissionsGuide.md   |  284 ++
 .../src/site/markdown/HdfsQuotaAdminGuide.md|   92 +
 .../src/site/markdown/HdfsUserGuide.md  |  375 +++
 .../hadoop-hdfs/src/site/markdown/LibHdfs.md|   92 +
 .../src/site/markdown/SLGUserGuide.md   |  157 ++
 .../src/site/markdown/ShortCircuitLocalReads.md |   87 +
 .../src/site/markdown/TransparentEncryption.md  |  268 ++
 .../hadoop-hdfs/src/site/markdown/ViewFs.md |  242 ++
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 1939 +
 45 files changed, 7260 insertions(+), 9907 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a45ef2b6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9117fc8..bf4c9de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -141,6 +141,9 @@ Trunk (Unreleased)
 
 HDFS-7322. deprecate sbin/hadoop-daemon.sh (aw)
 
+HDFS-7668. Convert site documentation from apt to markdown (Masatake
+Iwasaki via aw)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a45ef2b6/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ArchivalStorage.apt.vm
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ArchivalStorage.apt.vm 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ArchivalStorage.apt.vm
deleted file mode 100644
index 5336ea3..000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ArchivalStorage.apt.vm
+++ /dev/null
@@ -1,233 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~   http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is 

[07/50] [abbrv] hadoop git commit: HADOOP-9869. Configuration.getSocketAddr()/getEnum() should use getTrimmed(). Contributed by Tsuyoshi Ozawa.

2015-02-23 Thread zhz
HADOOP-9869. Configuration.getSocketAddr()/getEnum() should use getTrimmed(). 
Contributed by Tsuyoshi Ozawa.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b12bedef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b12bedef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b12bedef

Branch: refs/heads/HDFS-EC
Commit: b12bedef6ed6b00ffd48efbba69b58c1db73d127
Parents: 90741b1
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu Feb 12 15:50:48 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:47 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../org/apache/hadoop/conf/Configuration.java   |  6 ++--
 .../apache/hadoop/conf/TestConfiguration.java   | 29 +++-
 3 files changed, 35 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b12bedef/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7078d42..c11e340 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -597,6 +597,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11586. Update use of Iterator to Iterable in
 AbstractMetricsContext.java. (Ray Chiang via aajisaka)
 
+HADOOP-9869. Configuration.getSocketAddr()/getEnum() should use
+getTrimmed(). (Tsuyoshi Ozawa via aajisaka)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b12bedef/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index ea0d3a6..02654b7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -1491,13 +1491,14 @@ public class Configuration implements 
IterableMap.EntryString,String,
 
   /**
* Return value matching this enumerated type.
+   * Note that the returned value is trimmed by this method.
* @param name Property name
* @param defaultValue Value returned if no mapping exists
* @throws IllegalArgumentException If mapping is illegal for the type
* provided
*/
   public T extends EnumT T getEnum(String name, T defaultValue) {
-final String val = get(name);
+final String val = getTrimmed(name);
 return null == val
   ? defaultValue
   : Enum.valueOf(defaultValue.getDeclaringClass(), val);
@@ -1594,6 +1595,7 @@ public class Configuration implements 
IterableMap.EntryString,String,
* Get the value of the codename/code property as a codePattern/code.
* If no such property is specified, or if the specified value is not a valid
* codePattern/code, then codeDefaultValue/code is returned.
+   * Note that the returned value is NOT trimmed by this method.
*
* @param name property name
* @param defaultValue default value
@@ -2044,7 +2046,7 @@ public class Configuration implements 
IterableMap.EntryString,String,
*/
   public InetSocketAddress getSocketAddr(
   String name, String defaultAddress, int defaultPort) {
-final String address = get(name, defaultAddress);
+final String address = getTrimmed(name, defaultAddress);
 return NetUtils.createSocketAddr(address, defaultPort, name);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b12bedef/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index b84045d..a367553 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -56,6 +56,7 @@ public class TestConfiguration extends TestCase {
   private Configuration conf;
   final static String CONFIG = new 
File(./test-config-TestConfiguration.xml).getAbsolutePath();
   final static String CONFIG2 = new 
File(./test-config2-TestConfiguration.xml).getAbsolutePath();
+  final static String 

[06/50] [abbrv] hadoop git commit: YARN-3104. Fixed RM to not generate new AMRM tokens on every heartbeat between rolling and activation. Contributed by Jason Lowe

2015-02-23 Thread zhz
YARN-3104. Fixed RM to not generate new AMRM tokens on every heartbeat between 
rolling and activation. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b1b2298
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b1b2298
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b1b2298

Branch: refs/heads/HDFS-EC
Commit: 4b1b2298b1229940eee89fc26cc9a2e2295de013
Parents: b12bede
Author: Jian He jia...@apache.org
Authored: Thu Feb 12 16:02:24 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:47 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../ApplicationMasterService.java   | 16 ---
 .../rmapp/attempt/RMAppAttemptImpl.java | 31 --
 .../yarn/server/resourcemanager/MockAM.java | 28 
 .../security/TestAMRMTokens.java| 45 ++--
 5 files changed, 90 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b1b2298/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b91281e..bb19394 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -573,6 +573,9 @@ Release 2.7.0 - UNRELEASED
 YARN-1580. Documentation error regarding 
container-allocation.expiry-interval-ms 
 (Brahma Reddy Battula via junping_du)
 
+YARN-3104. Fixed RM to not generate new AMRM tokens on every heartbeat
+between rolling and activation. (Jason Lowe via jianhe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b1b2298/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index 6650cf2..1c7f987 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -588,16 +588,20 @@ public class ApplicationMasterService extends 
AbstractService implements
   if (nextMasterKey != null
nextMasterKey.getMasterKey().getKeyId() != amrmTokenIdentifier
 .getKeyId()) {
-TokenAMRMTokenIdentifier amrmToken =
-rmContext.getAMRMTokenSecretManager().createAndGetAMRMToken(
-  appAttemptId);
-((RMAppAttemptImpl)appAttempt).setAMRMToken(amrmToken);
+RMAppAttemptImpl appAttemptImpl = (RMAppAttemptImpl)appAttempt;
+TokenAMRMTokenIdentifier amrmToken = appAttempt.getAMRMToken();
+if (nextMasterKey.getMasterKey().getKeyId() !=
+appAttemptImpl.getAMRMTokenKeyId()) {
+  LOG.info(The AMRMToken has been rolled-over. Send new AMRMToken 
back
+  +  to application:  + applicationId);
+  amrmToken = rmContext.getAMRMTokenSecretManager()
+  .createAndGetAMRMToken(appAttemptId);
+  appAttemptImpl.setAMRMToken(amrmToken);
+}
 allocateResponse.setAMRMToken(org.apache.hadoop.yarn.api.records.Token
   .newInstance(amrmToken.getIdentifier(), amrmToken.getKind()
 .toString(), amrmToken.getPassword(), amrmToken.getService()
 .toString()));
-LOG.info(The AMRMToken has been rolled-over. Send new AMRMToken back
-+  to application:  + applicationId);
   }
 
   /*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b1b2298/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 

[48/50] [abbrv] hadoop git commit: YARN-1299. Improve a log message in AppSchedulingInfo by adding application id. Contributed by Ashutosh Jindal and devaraj.

2015-02-23 Thread zhz
YARN-1299. Improve a log message in AppSchedulingInfo by adding application id. 
Contributed by Ashutosh Jindal and devaraj.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c6c2e17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c6c2e17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c6c2e17

Branch: refs/heads/HDFS-EC
Commit: 6c6c2e17ab23cc3a61ffa1a06341b9d737d30844
Parents: da1dfed
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Feb 17 01:48:22 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:51 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java  | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c6c2e17/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7161dc3..bb145eb 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -298,6 +298,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3203. Correct a log message in AuxServices. (Brahma Reddy Battula 
 via ozawa)
 
+YARN-1299. Improve a log message in AppSchedulingInfo by adding 
application 
+id. (Ashutosh Jindal and devaraj via ozawa)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c6c2e17/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index 3ade7f7..a9a459f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -176,7 +176,8 @@ public class AppSchedulingInfo {
 
 // Similarly, deactivate application?
 if (request.getNumContainers() = 0) {
-  LOG.info(checking for deactivate... );
+  LOG.info(checking for deactivate of application :
+  + this.applicationId);
   checkForDeactivation();
 }
 



[22/50] [abbrv] hadoop git commit: HDFS-7668. Convert site documentation from apt to markdown (Masatake Iwasaki via aw)

2015-02-23 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a45ef2b6/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
new file mode 100644
index 000..2038401
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -0,0 +1,160 @@
+!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+Archival Storage, SSD  Memory
+==
+
+* [Archival Storage, SSD  Memory](#Archival_Storage_SSD__Memory)
+* [Introduction](#Introduction)
+* [Storage Types and Storage Policies](#Storage_Types_and_Storage_Policies)
+* [Storage Types: ARCHIVE, DISK, SSD and 
RAM\_DISK](#Storage_Types:_ARCHIVE_DISK_SSD_and_RAM_DISK)
+* [Storage Policies: Hot, Warm, Cold, All\_SSD, One\_SSD and 
Lazy\_Persist](#Storage_Policies:_Hot_Warm_Cold_All_SSD_One_SSD_and_Lazy_Persist)
+* [Storage Policy Resolution](#Storage_Policy_Resolution)
+* [Configuration](#Configuration)
+* [Mover - A New Data Migration Tool](#Mover_-_A_New_Data_Migration_Tool)
+* [Storage Policy Commands](#Storage_Policy_Commands)
+* [List Storage Policies](#List_Storage_Policies)
+* [Set Storage Policy](#Set_Storage_Policy)
+* [Get Storage Policy](#Get_Storage_Policy)
+
+Introduction
+
+
+*Archival Storage* is a solution to decouple growing storage capacity from 
compute capacity. Nodes with higher density and less expensive storage with low 
compute power are becoming available and can be used as cold storage in the 
clusters. Based on policy the data from hot can be moved to the cold. Adding 
more nodes to the cold storage can grow the storage independent of the compute 
capacity in the cluster.
+
+The frameworks provided by Heterogeneous Storage and Archival Storage 
generalizes the HDFS architecture to include other kinds of storage media 
including *SSD* and *memory*. Users may choose to store their data in SSD or 
memory for a better performance.
+
+Storage Types and Storage Policies
+--
+
+### Storage Types: ARCHIVE, DISK, SSD and RAM\_DISK
+
+The first phase of [Heterogeneous Storage 
(HDFS-2832)](https://issues.apache.org/jira/browse/HDFS-2832) changed datanode 
storage model from a single storage, which may correspond to multiple physical 
storage medias, to a collection of storages with each storage corresponding to 
a physical storage media. It also added the notion of storage types, DISK and 
SSD, where DISK is the default storage type.
+
+A new storage type *ARCHIVE*, which has high storage density (petabyte of 
storage) but little compute power, is added for supporting archival storage.
+
+Another new storage type *RAM\_DISK* is added for supporting writing single 
replica files in memory.
+
+### Storage Policies: Hot, Warm, Cold, All\_SSD, One\_SSD and Lazy\_Persist
+
+A new concept of storage policies is introduced in order to allow files to be 
stored in different storage types according to the storage policy.
+
+We have the following storage policies:
+
+* **Hot** - for both storage and compute. The data that is popular and still 
being used for processing will stay in this policy. When a block is hot, all 
replicas are stored in DISK.
+* **Cold** - only for storage with limited compute. The data that is no longer 
being used, or data that needs to be archived is moved from hot storage to cold 
storage. When a block is cold, all replicas are stored in ARCHIVE.
+* **Warm** - partially hot and partially cold. When a block is warm, some of 
its replicas are stored in DISK and the remaining replicas are stored in 
ARCHIVE.
+* **All\_SSD** - for storing all replicas in SSD.
+* **One\_SSD** - for storing one of the replicas in SSD. The remaining 
replicas are stored in DISK.
+* **Lazy\_Persist** - for writing blocks with single replica in memory. The 
replica is first written in RAM\_DISK and then it is lazily persisted in DISK.
+
+More formally, a storage policy consists of the following fields:
+
+1.  Policy ID
+2.  Policy name
+3.  A list of storage types for block placement
+4.  A list of fallback storage types for file creation
+5.  A list of fallback storage types for replication
+
+When there is enough space, block replicas 

[24/50] [abbrv] hadoop git commit: HDFS-7668. Convert site documentation from apt to markdown (Masatake Iwasaki via aw)

2015-02-23 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a45ef2b6/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/TransparentEncryption.apt.vm
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/TransparentEncryption.apt.vm 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/TransparentEncryption.apt.vm
deleted file mode 100644
index fe722fb..000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/TransparentEncryption.apt.vm
+++ /dev/null
@@ -1,290 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~   http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License. See accompanying LICENSE file.
-
-  ---
-  Hadoop Distributed File System-${project.version} - Transparent Encryption 
in HDFS
-  ---
-  ---
-  ${maven.build.timestamp}
-
-Transparent Encryption in HDFS
-
-%{toc|section=1|fromDepth=2|toDepth=3}
-
-* {Overview}
-
-  HDFS implements transparent, end-to-end encryption.
-  Once configured, data read from and written to special HDFS directories is 
transparently encrypted and decrypted without requiring changes to user 
application code.
-  This encryption is also end-to-end, which means the data can only be 
encrypted and decrypted by the client.
-  HDFS never stores or has access to unencrypted data or unencrypted data 
encryption keys.
-  This satisfies two typical requirements for encryption: at-rest encryption 
(meaning data on persistent media, such as a disk) as well as in-transit 
encryption (e.g. when data is travelling over the network).
-
-* {Background}
-
-  Encryption can be done at different layers in a traditional data management 
software/hardware stack.
-  Choosing to encrypt at a given layer comes with different advantages and 
disadvantages.
-
-* Application-level encryption. This is the most secure and most 
flexible approach. The application has ultimate control over what is encrypted 
and can precisely reflect the requirements of the user. However, writing 
applications to do this is hard. This is also not an option for customers of 
existing applications that do not support encryption.
-
-* Database-level encryption. Similar to application-level encryption 
in terms of its properties. Most database vendors offer some form of 
encryption. However, there can be performance issues. One example is that 
indexes cannot be encrypted.
-
-* Filesystem-level encryption. This option offers high performance, 
application transparency, and is typically easy to deploy. However, it is 
unable to model some application-level policies. For instance, multi-tenant 
applications might want to encrypt based on the end user. A database might want 
different encryption settings for each column stored within a single file.
-
-* Disk-level encryption. Easy to deploy and high performance, but also 
quite inflexible. Only really protects against physical theft.
-
-  HDFS-level encryption fits between database-level and filesystem-level 
encryption in this stack. This has a lot of positive effects. HDFS encryption 
is able to provide good performance and existing Hadoop applications are able 
to run transparently on encrypted data. HDFS also has more context than 
traditional filesystems when it comes to making policy decisions.
-
-  HDFS-level encryption also prevents attacks at the filesystem-level and 
below (so-called OS-level attacks). The operating system and disk only 
interact with encrypted bytes, since the data is already encrypted by HDFS.
-
-* {Use Cases}
-
-  Data encryption is required by a number of different government, financial, 
and regulatory entities.
-  For example, the health-care industry has HIPAA regulations, the card 
payment industry has PCI DSS regulations, and the US government has FISMA 
regulations.
-  Having transparent encryption built into HDFS makes it easier for 
organizations to comply with these regulations.
-
-  Encryption can also be performed at the application-level, but by 
integrating it into HDFS, existing applications can operate on encrypted data 
without changes.
-  This integrated architecture implies stronger encrypted file semantics and 
better coordination with other HDFS functions.
-
-* {Architecture}
-
-** {Overview}
-
-  For transparent encryption, we introduce a new abstraction to HDFS: the 
encryption zone.
-  An encryption zone is a special directory whose contents will be 
transparently encrypted upon write and transparently decrypted upon read.
-  Each encryption zone is associated with a single encryption zone key 

[26/50] [abbrv] hadoop git commit: HDFS-7668. Convert site documentation from apt to markdown (Masatake Iwasaki via aw)

2015-02-23 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a45ef2b6/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm
deleted file mode 100644
index a3a9f12..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm
+++ /dev/null
@@ -1,816 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~   http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License. See accompanying LICENSE file.
-
-  ---
-  Hadoop Distributed File System-${project.version} - High Availability
-  ---
-  ---
-  ${maven.build.timestamp}
-
-HDFS High Availability Using the Quorum Journal Manager
-
-%{toc|section=1|fromDepth=0}
-
-* {Purpose}
-
-  This guide provides an overview of the HDFS High Availability (HA) feature
-  and how to configure and manage an HA HDFS cluster, using the Quorum Journal
-  Manager (QJM) feature.
-
-  This document assumes that the reader has a general understanding of
-  general components and node types in an HDFS cluster. Please refer to the
-  HDFS Architecture guide for details.
-
-* {Note: Using the Quorum Journal Manager or Conventional Shared Storage}
-
-  This guide discusses how to configure and use HDFS HA using the Quorum
-  Journal Manager (QJM) to share edit logs between the Active and Standby
-  NameNodes. For information on how to configure HDFS HA using NFS for shared
-  storage instead of the QJM, please see
-  {{{./HDFSHighAvailabilityWithNFS.html}this alternative guide.}}
-
-* {Background}
-
-  Prior to Hadoop 2.0.0, the NameNode was a single point of failure (SPOF) in
-  an HDFS cluster. Each cluster had a single NameNode, and if that machine or
-  process became unavailable, the cluster as a whole would be unavailable
-  until the NameNode was either restarted or brought up on a separate machine.
-
-  This impacted the total availability of the HDFS cluster in two major ways:
-
-* In the case of an unplanned event such as a machine crash, the cluster 
would
-  be unavailable until an operator restarted the NameNode.
-
-* Planned maintenance events such as software or hardware upgrades on the
-  NameNode machine would result in windows of cluster downtime.
-
-  The HDFS High Availability feature addresses the above problems by providing
-  the option of running two redundant NameNodes in the same cluster in an
-  Active/Passive configuration with a hot standby. This allows a fast failover 
to
-  a new NameNode in the case that a machine crashes, or a graceful
-  administrator-initiated failover for the purpose of planned maintenance.
-
-* {Architecture}
-
-  In a typical HA cluster, two separate machines are configured as NameNodes.
-  At any point in time, exactly one of the NameNodes is in an Active state,
-  and the other is in a Standby state. The Active NameNode is responsible
-  for all client operations in the cluster, while the Standby is simply acting
-  as a slave, maintaining enough state to provide a fast failover if
-  necessary.
-
-  In order for the Standby node to keep its state synchronized with the Active
-  node, both nodes communicate with a group of separate daemons called
-  JournalNodes (JNs). When any namespace modification is performed by the
-  Active node, it durably logs a record of the modification to a majority of
-  these JNs. The Standby node is capable of reading the edits from the JNs, and
-  is constantly watching them for changes to the edit log. As the Standby Node
-  sees the edits, it applies them to its own namespace. In the event of a
-  failover, the Standby will ensure that it has read all of the edits from the
-  JounalNodes before promoting itself to the Active state. This ensures that 
the
-  namespace state is fully synchronized before a failover occurs.
-
-  In order to provide a fast failover, it is also necessary that the Standby 
node
-  have up-to-date information regarding the location of blocks in the cluster.
-  In order to achieve this, the DataNodes are configured with the location of
-  both NameNodes, and send block location information and heartbeats to both.
-
-  It is vital for the correct operation of an HA cluster that only one of the
-  NameNodes be Active at a time. Otherwise, the namespace state would quickly
-  diverge between the two, risking data loss or other 

[16/50] [abbrv] hadoop git commit: HADOOP-11565. Add --slaves shell option (aw)

2015-02-23 Thread zhz
HADOOP-11565. Add --slaves shell option (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/812817c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/812817c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/812817c0

Branch: refs/heads/HDFS-EC
Commit: 812817c0ba56b1b1aae5fe81d45915bc2ddaf1be
Parents: 82c93b0
Author: Allen Wittenauer a...@apache.org
Authored: Thu Feb 12 18:01:28 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:48 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  2 ++
 .../hadoop-common/src/main/bin/hadoop|  5 +
 .../hadoop-common/src/main/bin/hadoop-config.sh  |  9 +++--
 .../hadoop-common/src/main/bin/hadoop-daemons.sh | 16 ++--
 .../src/main/bin/hadoop-functions.sh | 14 ++
 .../hadoop-hdfs/src/main/bin/hdfs|  5 +
 hadoop-mapreduce-project/bin/mapred  |  5 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn |  5 +
 .../hadoop-yarn/bin/yarn-daemons.sh  | 19 +--
 9 files changed, 74 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/812817c0/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c11e340..0d8c02f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -29,6 +29,8 @@ Trunk (Unreleased)
 
 HADOOP-11554. Expose HadoopKerberosName as a hadoop subcommand (aw)
 
+HADOOP-11565. Add --slaves shell option (aw)
+
   IMPROVEMENTS
 
 HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution

http://git-wip-us.apache.org/repos/asf/hadoop/blob/812817c0/hadoop-common-project/hadoop-common/src/main/bin/hadoop
--
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index 42e4e83..6003927 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -179,6 +179,11 @@ esac
 
 hadoop_verify_user ${COMMAND}
 
+if [[ ${HADOOP_SLAVE_MODE} = true ]]; then
+  hadoop_common_slave_mode_execute ${HADOOP_HDFS_HOME}/bin/hdfs 
${HADOOP_USER_PARAMS[@]}
+  exit $?
+fi
+
 # Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
 hadoop_debug Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS
 HADOOP_OPTS=${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/812817c0/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
--
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
index f0cabbe..58b871e 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
@@ -1,4 +1,4 @@
-
+#!/usr/bin/env bash
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -91,7 +91,7 @@ hadoop_bootstrap
 
 # save these off in case our caller needs them
 # shellcheck disable=SC2034
-HADOOP_USER_PARAMS=$@
+HADOOP_USER_PARAMS=($@)
 
 HADOOP_DAEMON_MODE=default
 
@@ -152,6 +152,11 @@ while [[ -z ${_hadoop_common_done} ]]; do
   HADOOP_LOGLEVEL=$1
   shift
 ;;
+--slaves)
+  shift
+  # shellcheck disable=SC2034
+  HADOOP_SLAVE_MODE=true
+;;
 *)
   _hadoop_common_done=true
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/812817c0/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
--
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
index 20d7e4a..9e4e6b0 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
@@ -56,5 +56,17 @@ else
   hdfsscript=${HADOOP_HDFS_HOME}/bin/hdfs
 fi
 
-hadoop_connect_to_hosts $hdfsscript \
---config ${HADOOP_CONF_DIR} --daemon ${daemonmode} $@
+hadoop_error WARNING: Use of this script to ${daemonmode} HDFS daemons is 
deprecated.
+hadoop_error WARNING: Attempting to execute replacement \hdfs --slaves 
--daemon ${daemonmode}\ instead.
+
+#
+# Original input was usually:
+#  hadoop-daemons.sh (shell 

[30/50] [abbrv] hadoop git commit: HDFS-7791. dfs count -v should be added to quota documentation (Akira AJISAKA via aw)

2015-02-23 Thread zhz
HDFS-7791. dfs count -v should be added to quota documentation (Akira AJISAKA 
via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa93278c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa93278c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa93278c

Branch: refs/heads/HDFS-EC
Commit: fa93278c54e9ad899e91aff4389d1de91f72026a
Parents: a4f8c97
Author: Allen Wittenauer a...@apache.org
Authored: Fri Feb 13 10:56:30 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:49 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../hadoop-hdfs/src/site/markdown/HdfsQuotaAdminGuide.md| 5 +++--
 2 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa93278c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bf4c9de..6463010 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -307,6 +307,9 @@ Trunk (Unreleased)
 HDFS-7711. Fix various typos in ClusterSetup.md.
 (Brahma Reddy Battula via wheat9)
 
+HDFS-7791. dfs count -v should be added to quota documentation (Akira
+AJISAKA via aw)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa93278c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsQuotaAdminGuide.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsQuotaAdminGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsQuotaAdminGuide.md
index 380604b..a1bcd78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsQuotaAdminGuide.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsQuotaAdminGuide.md
@@ -82,11 +82,12 @@ Reporting Command
 
 An an extension to the count command of the HDFS shell reports quota values 
and the current count of names and bytes in use.
 
-*   `hadoop fs -count -q directory...directory`
+*   `hadoop fs -count -q [-h] [-v] directory...directory`
 
 With the -q option, also report the name quota value set for each
 directory, the available name quota remaining, the space quota
 value set, and the available space quota remaining. If the
 directory does not have a quota set, the reported values are `none`
-and `inf`.
+and `inf`. The -h option shows sizes in human readable format.
+The -v option displays a header line.
 



[10/50] [abbrv] hadoop git commit: HDFS-7711. Move the entry in CHANGE.txt to the right place.

2015-02-23 Thread zhz
HDFS-7711. Move the entry in CHANGE.txt to the right place.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90741b19
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90741b19
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90741b19

Branch: refs/heads/HDFS-EC
Commit: 90741b19c5ee798eaed0ffcef5f5f35c83a43d40
Parents: c2fe828
Author: Haohui Mai whe...@apache.org
Authored: Thu Feb 12 15:44:05 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:47 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 ---
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90741b19/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index e891755..7078d42 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -923,9 +923,6 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11587. TestMapFile#testMainMethodMapFile creates test files in
 hadoop-common project root. (Xiaoyu Yao via wheat9)
 
-HDFS-7711. Fix various typos in ClusterSetup.md.
-(Brahma Reddy Battula via wheat9)
-
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90741b19/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6e54428..6c1885e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -301,6 +301,9 @@ Trunk (Unreleased)
 HDFS-7755. httpfs shell code has hardcoded path to bash (Dmitry
 Sivachenko via aw)
 
+HDFS-7711. Fix various typos in ClusterSetup.md.
+(Brahma Reddy Battula via wheat9)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[34/50] [abbrv] hadoop git commit: HDFS-7686. Re-add rapid rescan of possibly corrupt block feature to the block scanner (cmccabe)

2015-02-23 Thread zhz
HDFS-7686. Re-add rapid rescan of possibly corrupt block feature to the block 
scanner (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/904786f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/904786f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/904786f3

Branch: refs/heads/HDFS-EC
Commit: 904786f3b44a27bebb52d432a57e34e033d425a0
Parents: 7ae0d42
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Fri Feb 13 14:35:49 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:49 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/datanode/BlockScanner.java  |  32 +
 .../hdfs/server/datanode/BlockSender.java   |   3 +
 .../hdfs/server/datanode/VolumeScanner.java | 133 ++-
 .../hdfs/server/datanode/TestBlockScanner.java  | 131 ++
 5 files changed, 268 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/904786f3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7705f87..747f54a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -959,6 +959,9 @@ Release 2.7.0 - UNRELEASED
   HDFS-7776. Adding additional unit tests for Quota By Storage Type.
   (Xiaoyu Yao via Arpit Agarwal)
 
+  HDFS-7686. Re-add rapid rescan of possibly corrupt block feature to the
+  block scanner (cmccabe)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/904786f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index 7429fff..b0248c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -29,6 +29,7 @@ import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.VolumeScanner.ScanResultHandler;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.Uninterruptibles;
@@ -278,6 +279,37 @@ public class BlockScanner {
 }
   }
 
+  /**
+   * Mark a block as suspect.
+   *
+   * This means that we should try to rescan it soon.  Note that the
+   * VolumeScanner keeps a list of recently suspicious blocks, which
+   * it uses to avoid rescanning the same block over and over in a short
+   * time frame.
+   *
+   * @param storageId The ID of the storage where the block replica
+   *  is being stored.
+   * @param block The block's ID and block pool id.
+   */
+  synchronized void markSuspectBlock(String storageId, ExtendedBlock block) {
+if (!isEnabled()) {
+  LOG.info(Not scanning suspicious block {} on {}, because the block  +
+  scanner is disabled., block, storageId);
+  return;
+}
+VolumeScanner scanner = scanners.get(storageId);
+if (scanner == null) {
+  // This could happen if the volume is in the process of being removed.
+  // The removal process shuts down the VolumeScanner, but the volume
+  // object stays around as long as there are references to it (which
+  // should not be that long.)
+  LOG.info(Not scanning suspicious block {} on {}, because there is no  +
+  volume scanner for that storageId., block, storageId);
+  return;
+}
+scanner.markSuspectBlock(block);
+  }
+
   @InterfaceAudience.Private
   public static class Servlet extends HttpServlet {
 private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/904786f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index c016e62..f4cde11 100644
--- 

[50/50] [abbrv] hadoop git commit: HADOOP-11589. NetUtils.createSocketAddr should trim the input URI. Contributed by Rakesh R.

2015-02-23 Thread zhz
HADOOP-11589. NetUtils.createSocketAddr should trim the input URI. Contributed 
by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c26687ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c26687ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c26687ec

Branch: refs/heads/HDFS-EC
Commit: c26687ec0df6710f64993bf1804420ee3a8c2728
Parents: 86a63b4
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Sun Feb 15 00:30:46 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:51 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../src/main/java/org/apache/hadoop/net/NetUtils.java|  1 +
 .../test/java/org/apache/hadoop/net/TestNetUtils.java| 11 +++
 3 files changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c26687ec/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 99320cb..522ec47 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -602,6 +602,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-9869. Configuration.getSocketAddr()/getEnum() should use
 getTrimmed(). (Tsuyoshi Ozawa via aajisaka)
 
+HADOOP-11589. NetUtils.createSocketAddr should trim the input URI.
+(Rakesh R via ozawa)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c26687ec/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index ef1092b..e475149 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -189,6 +189,7 @@ public class NetUtils {
   throw new IllegalArgumentException(Target address cannot be null. +
   helpText);
 }
+target = target.trim();
 boolean hasScheme = target.contains(://);
 URI uri = null;
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c26687ec/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
index 319e8a9..a12054b 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
@@ -643,6 +643,17 @@ public class TestNetUtils {
 assertNotNull(NetUtils.getHostNameOfIP(127.0.0.1:1));
   }
 
+  @Test
+  public void testTrimCreateSocketAddress() {
+Configuration conf = new Configuration();
+NetUtils.addStaticResolution(host, 127.0.0.1);
+final String defaultAddr = host:1  ;
+
+InetSocketAddress addr = NetUtils.createSocketAddr(defaultAddr);
+conf.setSocketAddr(myAddress, addr);
+assertEquals(defaultAddr.trim(), NetUtils.getHostPortString(addr));
+  }
+
   private T void assertBetterArrayEquals(T[] expect, T[]got) {
 String expectStr = StringUtils.join(expect, , );
 String gotStr = StringUtils.join(got, , );



[32/50] [abbrv] hadoop git commit: MAPREDUCE-6255. Fix JobCounter's format to use grouping separator. Contributed by Ryu Kobayashi.

2015-02-23 Thread zhz
MAPREDUCE-6255. Fix JobCounter's format to use grouping separator. Contributed 
by Ryu Kobayashi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4f8c978
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4f8c978
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4f8c978

Branch: refs/heads/HDFS-EC
Commit: a4f8c978d4fb4e7ff67c4932a63c50ac88b7af74
Parents: ac5497c
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Fri Feb 13 16:09:54 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:49 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt   | 3 +++
 .../apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java   | 6 +++---
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f8c978/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 657b441..38982e4 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -288,6 +288,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-4431. mapred command should print the reason on killing already 
 completed jobs. (devaraj via ozawa)
 
+MAPREDUCE-6255. Fix JobCounter's format to use grouping separator.
+(Ryu Kobayashi via ozawa)
+
   OPTIMIZATIONS
 
 MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f8c978/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
index 1b20c5d..568658e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
@@ -139,10 +139,10 @@ public class CountersBlock extends HtmlBlock {
   Counter mc = mg == null ? null : mg.findCounter(counter.getName());
   Counter rc = rg == null ? null : rg.findCounter(counter.getName());
   groupRow.
-td(mc == null ? 0 : String.valueOf(mc.getValue())).
-td(rc == null ? 0 : String.valueOf(rc.getValue()));
+td(mc == null ? 0 : String.format(%,d, mc.getValue())).
+td(rc == null ? 0 : String.format(%,d, rc.getValue()));
 }
-groupRow.td(String.valueOf(counter.getValue()))._();
+groupRow.td(String.format(%,d, counter.getValue()))._();
   }
   group._()._()._()._();
 }



[15/50] [abbrv] hadoop git commit: HDFS-7684. The host:port settings of the deamons should be trimmed before use. Contributed by Anu Engineer.

2015-02-23 Thread zhz
HDFS-7684. The host:port settings of the deamons should be trimmed before use. 
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/693e43c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/693e43c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/693e43c4

Branch: refs/heads/HDFS-EC
Commit: 693e43c4dcdbc56efa3c737a0e4941f01ed4ad23
Parents: 2efa407
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu Feb 12 17:38:37 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:48 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../server/blockmanagement/DatanodeManager.java |   8 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   6 +-
 .../server/datanode/web/DatanodeHttpServer.java |   2 +-
 .../hadoop/hdfs/server/namenode/BackupNode.java |   6 +-
 .../hdfs/server/namenode/ImageServlet.java  |   2 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |   4 +-
 .../server/namenode/NameNodeHttpServer.java |   4 +-
 .../hdfs/server/namenode/SecondaryNameNode.java |   4 +-
 .../hdfs/server/namenode/TestMalformedURLs.java |  59 
 .../src/test/resources/hdfs-site.malformed.xml  | 143 +++
 11 files changed, 223 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/693e43c4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6c1885e..09ae2e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -628,6 +628,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7694. FSDataInputStream should support unbuffer (cmccabe)
 
+HDFS-7684. The host:port settings of the daemons should be trimmed before
+use. (Anu Engineer via aajisaka)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/693e43c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 15e7010..f5fe161 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -187,16 +187,16 @@ public class DatanodeManager {
 this.fsClusterStats = newFSClusterStats();
 
 this.defaultXferPort = NetUtils.createSocketAddr(
-  conf.get(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,
+  conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,
   DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort();
 this.defaultInfoPort = NetUtils.createSocketAddr(
-  conf.get(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,
+  conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,
   DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT)).getPort();
 this.defaultInfoSecurePort = NetUtils.createSocketAddr(
-conf.get(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY,
+conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY,
 DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort();
 this.defaultIpcPort = NetUtils.createSocketAddr(
-  conf.get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
+  conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
   DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
 try {
   this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/693e43c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 8d3b3a2..4428408 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -726,7 +726,7 @@ public class DataNode extends ReconfigurableBase
 
   private 

[29/50] [abbrv] hadoop git commit: YARN-3164. RMAdmin command usage prints incorrect command name. Contributed by Bibin A Chundatt

2015-02-23 Thread zhz
YARN-3164. RMAdmin command usage prints incorrect command name.
Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac5497c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac5497c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac5497c4

Branch: refs/heads/HDFS-EC
Commit: ac5497c48b33b2cf89720070ef94a51aac7357bd
Parents: 7367700
Author: Xuan xg...@apache.org
Authored: Thu Feb 12 21:39:16 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:49 2015 -0800

--
 .../main/java/org/apache/hadoop/ha/HAAdmin.java   |  4 ++--
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../apache/hadoop/yarn/client/cli/RMAdminCLI.java | 10 ++
 .../hadoop/yarn/client/cli/TestRMAdminCLI.java| 18 +-
 4 files changed, 32 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac5497c4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
index f72df77..9c28eb9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
@@ -125,12 +125,12 @@ public abstract class HAAdmin extends Configured 
implements Tool {
 ToolRunner.printGenericCommandUsage(errOut);
   }
   
-  private static void printUsage(PrintStream errOut, String cmd) {
+  private void printUsage(PrintStream errOut, String cmd) {
 UsageInfo usage = USAGE.get(cmd);
 if (usage == null) {
   throw new RuntimeException(No usage for cmd  + cmd);
 }
-errOut.println(Usage: HAAdmin [ + cmd +   + usage.args + ]);
+errOut.println(getUsageString() +  [ + cmd +   + usage.args + ]);
   }
 
   private int transitionToActive(final CommandLine cmd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac5497c4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0e8a8e4..cd9b467 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -590,6 +590,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3191. Log object should be initialized with its own class. (Rohith via
 aajisaka)
 
+YARN-3164. RMAdmin command usage prints incorrect command name. 
+(Bibin A Chundatt via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac5497c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index d29f674..4642add 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.client.cli;
 
 import java.io.IOException;
+import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -127,6 +128,10 @@ public class RMAdminCLI extends HAAdmin {
 super(conf);
   }
 
+  protected void setErrOut(PrintStream errOut) {
+this.errOut = errOut;
+  }
+
   private static void appendHAUsage(final StringBuilder usageBuilder) {
 for (Map.EntryString,UsageInfo cmdEntry : USAGE.entrySet()) {
   if (cmdEntry.getKey().equals(-help)) {
@@ -639,6 +644,11 @@ public class RMAdminCLI extends HAAdmin {
   Could not connect to RM HA Admin for node  + rmId);
 }
   }
+  
+  @Override
+  protected String getUsageString() {
+return Usage: rmadmin;
+  }
 
   public static void main(String[] args) throws Exception {
 int result = ToolRunner.run(new RMAdminCLI(), args);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac5497c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
--
diff --git 

[12/50] [abbrv] hadoop git commit: YARN-3182. Cleanup switch statement in ApplicationMasterLauncher#handle(). Contributed by Ray Chiang.

2015-02-23 Thread zhz
YARN-3182. Cleanup switch statement in ApplicationMasterLauncher#handle(). 
Contributed by Ray Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73677007
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73677007
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73677007

Branch: refs/heads/HDFS-EC
Commit: 736770071c3adbc9a6985bab28492b6bc3a1c8db
Parents: eb58025
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Fri Feb 13 14:21:21 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:48 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../resourcemanager/amlauncher/ApplicationMasterLauncher.java | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73677007/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 622072f..0e8a8e4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -292,6 +292,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3179. Update use of Iterator to Iterable in RMAdminCLI and
 CommonNodeLabelsManager. (Ray Chiang via xgong)
 
+YARN-3182. Cleanup switch statement in ApplicationMasterLauncher#handle().
+(Ray Chiang via ozawa)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73677007/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
index af02b19..5fc39fd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
@@ -117,6 +117,7 @@ public class ApplicationMasterLauncher extends 
AbstractService implements
   break;
 case CLEANUP:
   cleanup(application);
+  break;
 default:
   break;
 }



[09/50] [abbrv] hadoop git commit: HADOOP-11586. Update use of Iterator to Iterable in AbstractMetricsContext.java. Contributed by Ray Chiang.

2015-02-23 Thread zhz
HADOOP-11586. Update use of Iterator to Iterable in 
AbstractMetricsContext.java. Contributed by Ray Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27447c57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27447c57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27447c57

Branch: refs/heads/HDFS-EC
Commit: 27447c57717ccaf991933c56dee566e71f86242e
Parents: f607b72
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu Feb 12 14:41:03 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:47 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../hadoop/metrics/spi/AbstractMetricsContext.java  | 12 ++--
 2 files changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27447c57/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0a274d3..7078d42 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -594,6 +594,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11543. Improve help message for hadoop/yarn command. (Brahma 
 Reddy Battula via ozawa).
 
+HADOOP-11586. Update use of Iterator to Iterable in
+AbstractMetricsContext.java. (Ray Chiang via aajisaka)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27447c57/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java
index ae45585..067dc35 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java
@@ -318,13 +318,13 @@ public abstract class AbstractMetricsContext implements 
MetricsContext {
*  Emits the records.
*/
   private synchronized void emitRecords() throws IOException {
-for (String recordName : bufferedData.keySet()) {
-  RecordMap recordMap = bufferedData.get(recordName);
+for (Map.EntryString,RecordMap recordEntry : bufferedData.entrySet()) {
+  RecordMap recordMap = recordEntry.getValue();
   synchronized (recordMap) {
 SetEntryTagMap, MetricMap entrySet = recordMap.entrySet ();
 for (EntryTagMap, MetricMap entry : entrySet) {
   OutputRecord outRec = new OutputRecord(entry.getKey(), 
entry.getValue());
-  emitRecord(contextName, recordName, outRec);
+  emitRecord(contextName, recordEntry.getKey(), outRec);
 }
   }
 }
@@ -339,8 +339,8 @@ public abstract class AbstractMetricsContext implements 
MetricsContext {
   @Override
   public synchronized MapString, CollectionOutputRecord getAllRecords() {
 MapString, CollectionOutputRecord out = new TreeMapString, 
CollectionOutputRecord();
-for (String recordName : bufferedData.keySet()) {
-  RecordMap recordMap = bufferedData.get(recordName);
+for (Map.EntryString,RecordMap recordEntry : bufferedData.entrySet()) {
+  RecordMap recordMap = recordEntry.getValue();
   synchronized (recordMap) {
 ListOutputRecord records = new ArrayListOutputRecord();
 SetEntryTagMap, MetricMap entrySet = recordMap.entrySet();
@@ -348,7 +348,7 @@ public abstract class AbstractMetricsContext implements 
MetricsContext {
   OutputRecord outRec = new OutputRecord(entry.getKey(), 
entry.getValue());
   records.add(outRec);
 }
-out.put(recordName, records);
+out.put(recordEntry.getKey(), records);
   }
 }
 return out;



[01/50] [abbrv] hadoop git commit: YARN-3181. FairScheduler: Fix up outdated findbugs issues. (kasha)

2015-02-23 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-EC 41921ce96 - 49204fcf1 (forced update)


YARN-3181. FairScheduler: Fix up outdated findbugs issues. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd4e5992
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd4e5992
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd4e5992

Branch: refs/heads/HDFS-EC
Commit: cd4e599297d0b116311f8241d59492d29d1de0c9
Parents: 52eb87b
Author: Karthik Kambatla ka...@apache.org
Authored: Thu Feb 12 13:44:47 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:46 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  2 ++
 .../dev-support/findbugs-exclude.xml| 27 
 .../scheduler/fair/AllocationConfiguration.java | 13 +++---
 .../fair/AllocationFileLoaderService.java   |  2 +-
 .../scheduler/fair/FSOpDurations.java   |  3 +++
 5 files changed, 16 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd4e5992/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d1b684e..1ff1fcb 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -277,6 +277,8 @@ Release 2.7.0 - UNRELEASED
 YARN-2079. Recover NonAggregatingLogHandler state upon nodemanager
 restart. (Jason Lowe via junping_du) 
 
+YARN-3181. FairScheduler: Fix up outdated findbugs issues. (kasha)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd4e5992/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index e0bbd7b..09a9d2e 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -142,22 +142,12 @@
 Class 
name=org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.LogAggregationService
 /
 Bug pattern=IS2_INCONSISTENT_SYNC /
   /Match
-  Match
-Class 
name=org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationFileLoaderService
 /
-Field name=allocFile /
-Bug pattern=IS2_INCONSISTENT_SYNC /
-  /Match
   !-- Inconsistent sync warning - minimumAllocation is only initialized once 
and never changed --
   Match
 Class 
name=org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler
 /
 Field name=minimumAllocation /
 Bug pattern=IS2_INCONSISTENT_SYNC /
   /Match
-  Match
-Class 
name=org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSSchedulerNode
 /
-Method name=reserveResource /
-Bug pattern=BC_UNCONFIRMED_CAST / 
-  /Match
   !-- Inconsistent sync warning - reinitialize read from other queue does not 
need sync--
   Match
 Class 
name=org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue
 /
@@ -213,18 +203,6 @@
 Field name=scheduleAsynchronously /
 Bug pattern=IS2_INCONSISTENT_SYNC /
   /Match
-  !-- Inconsistent sync warning - updateInterval is only initialized once and 
never changed --
-  Match
-Class 
name=org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler
 /
-Field name=updateInterval /
-Bug pattern=IS2_INCONSISTENT_SYNC /
-  /Match
-  !-- Inconsistent sync warning - callDurationMetrics is only initialized 
once and never changed --
-  Match
-Class 
name=org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler
 /
-Field name=fsOpDurations /
-Bug pattern=IS2_INCONSISTENT_SYNC /
-  /Match
 
   !-- Inconsistent sync warning - numRetries is only initialized once and 
never changed --
   Match
@@ -425,11 +403,6 @@
 Bug pattern=IS2_INCONSISTENT_SYNC /
   /Match
   Match
-Class 
name=org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler
 /
-Field name=allocConf /
-Bug pattern=IS2_INCONSISTENT_SYNC /
-  /Match
-  Match
 Class 
name=org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode /
 Field name=numContainers /
 Bug pattern=VO_VOLATILE_INCREMENT /

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd4e5992/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java

[39/50] [abbrv] hadoop git commit: HDFS-7775. Use consistent naming for NN-internal quota related types and functions. (Contributed bu Xiaoyu Yao)

2015-02-23 Thread zhz
HDFS-7775. Use consistent naming for NN-internal quota related types and 
functions. (Contributed bu Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6fc5b88
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6fc5b88
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6fc5b88

Branch: refs/heads/HDFS-EC
Commit: b6fc5b88d8e6538b81c117f5f5ca4009a63f
Parents: afebf70
Author: Arpit Agarwal a...@apache.org
Authored: Fri Feb 13 21:01:33 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:50 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   7 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  24 ++--
 .../hadoop/hdfs/DistributedFileSystem.java  |  10 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|  18 +--
 .../hadoop/hdfs/protocol/ClientProtocol.java|   8 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java |   2 +-
 .../ClientNamenodeProtocolTranslatorPB.java |   4 +-
 .../namenode/DirectoryWithQuotaFeature.java | 111 ++-
 .../hdfs/server/namenode/FSDirAttrOp.java   |  36 +++---
 .../hdfs/server/namenode/FSDirConcatOp.java |   2 +-
 .../hdfs/server/namenode/FSDirectory.java   |  39 +++
 .../hadoop/hdfs/server/namenode/FSImage.java|  16 +--
 .../hdfs/server/namenode/FSImageFormat.java |   9 +-
 .../server/namenode/FSImageFormatPBINode.java   |   6 +-
 .../server/namenode/FSImageSerialization.java   |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   6 +-
 .../hadoop/hdfs/server/namenode/INode.java  |  18 +--
 .../hdfs/server/namenode/INodeDirectory.java|  12 +-
 .../namenode/INodeDirectoryAttributes.java  |   8 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  46 
 .../hdfs/server/namenode/INodeReference.java|   4 +-
 .../hdfs/server/namenode/INodeSymlink.java  |   2 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   4 +-
 .../hadoop/hdfs/server/namenode/Quota.java  |  10 +-
 .../hdfs/server/namenode/QuotaCounts.java   |  98 
 .../snapshot/FileWithSnapshotFeature.java   |  14 +--
 .../src/main/proto/ClientNamenodeProtocol.proto |   2 +-
 .../namenode/TestDiskspaceQuotaUpdate.java  |  14 +--
 .../server/namenode/TestQuotaByStorageType.java |   6 +-
 .../snapshot/TestRenameWithSnapshots.java   |  10 +-
 .../namenode/snapshot/TestSnapshotDeletion.java |   4 +-
 31 files changed, 285 insertions(+), 267 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc5b88/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 196b1bd..324d5ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -951,6 +951,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7778. Rename FsVolumeListTest to TestFsVolumeList and commit it to
 branch-2. (Lei (Eddy) Xu via cnauroth)
 
+HDFS-4625. BKJM doesn't take advantage of speculative reads. (Rakesh R
+via aajisaka)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode
@@ -965,8 +968,8 @@ Release 2.7.0 - UNRELEASED
   HDFS-7776. Adding additional unit tests for Quota By Storage Type.
   (Xiaoyu Yao via Arpit Agarwal)
 
-  HDFS-4625. BKJM doesn't take advantage of speculative reads. (Rakesh R
-  via aajisaka)
+  HDFS-7775. Use consistent naming for NN-internal quota related types
+  and functions. (Xiaoyu Yao via Arpit Agarwal)
 
 Release 2.6.1 - UNRELEASED
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc5b88/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index c7c9fd8..3fd4e12 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3049,22 +3049,22 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
* Sets or resets quotas for a directory.
* @see ClientProtocol#setQuota(String, long, long, StorageType)
*/
-  void setQuota(String src, long namespaceQuota, long diskspaceQuota) 
+  void setQuota(String src, long namespaceQuota, long storagespaceQuota)
   throws IOException {
 // sanity check
 if ((namespaceQuota = 0  namespaceQuota != 

[23/50] [abbrv] hadoop git commit: HDFS-7668. Convert site documentation from apt to markdown (Masatake Iwasaki via aw)

2015-02-23 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a45ef2b6/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
deleted file mode 100644
index 3668286..000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
+++ /dev/null
@@ -1,2628 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~   http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License. See accompanying LICENSE file.
-
-  ---
-  Hadoop Distributed File System-${project.version} - WebHDFS REST API
-  ---
-  ---
-  ${maven.build.timestamp}
-
-WebHDFS REST API
-
-%{toc|section=1|fromDepth=0}
-
-* {Document Conventions}
-
-*--+---+
-| Monospaced | Used for commands, HTTP request and responses and 
code blocks.|
-*--+---+
-| \Monospaced\ | User entered values.  
|
-*--+---+
-| [Monospaced]   | Optional values.  When the value is not specified, 
the default value is used. |
-*--+---+
-| Italics| Important phrases and words.  
|
-*--+---+
-
-
-* {Introduction}
-
-  The HTTP REST API supports the complete
-  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}/{{{../../api/org/apache/hadoop/fs/FileContext.html}FileContext}}
-  interface for HDFS.
-  The operations and the corresponding FileSystem/FileContext methods are 
shown in the next section.
-  The Section {{HTTP Query Parameter Dictionary}} specifies the parameter 
details
-  such as the defaults and the valid values.
-
-** {Operations}
-
-  * HTTP GET
-
-* {{{Open and Read a File}OPEN}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.open)
-
-* {{{Status of a File/Directory}GETFILESTATUS}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getFileStatus)
-
-* {{{List a Directory}LISTSTATUS}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listStatus)
-
-* {{{Get Content Summary of a Directory}GETCONTENTSUMMARY}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getContentSummary)
-
-* {{{Get File Checksum}GETFILECHECKSUM}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getFileChecksum)
-
-* {{{Get Home Directory}GETHOMEDIRECTORY}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getHomeDirectory)
-
-* {{{Get Delegation Token}GETDELEGATIONTOKEN}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getDelegationToken)
-
-* {{{Get Delegation Tokens}GETDELEGATIONTOKENS}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getDelegationTokens)
-
-* {{{Get an XAttr}GETXATTRS}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getXAttr)
-
-* {{{Get multiple XAttrs}GETXATTRS}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getXAttrs)
-
-* {{{Get all XAttrs}GETXATTRS}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getXAttrs)
-
-* {{{List all XAttrs}LISTXATTRS}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs)
-
-* {{{Check access}CHECKACCESS}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access)
-
-  * HTTP PUT
-
-* {{{Create and Write to a File}CREATE}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.create)
-
-* {{{Make a Directory}MKDIRS}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.mkdirs)
-
-* {{{Create a Symbolic Link}CREATESYMLINK}}
-(see  
{{{../../api/org/apache/hadoop/fs/FileContext.html}FileContext}}.createSymlink)
-
-* {{{Rename a File/Directory}RENAME}}
-(see  

[31/50] [abbrv] hadoop git commit: HADOOP-11467. KerberosAuthenticator can connect to a non-secure cluster. (yzhangal via rkanter)

2015-02-23 Thread zhz
HADOOP-11467. KerberosAuthenticator can connect to a non-secure cluster. 
(yzhangal via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ae0d424
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ae0d424
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ae0d424

Branch: refs/heads/HDFS-EC
Commit: 7ae0d424c95b58cb0101cd3a49a554aec6d5ff77
Parents: 6116467
Author: Robert Kanter rkan...@apache.org
Authored: Fri Feb 13 14:01:46 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:49 2015 -0800

--
 .../client/KerberosAuthenticator.java   |  26 ++-
 .../server/AuthenticationToken.java | 162 ++
 .../security/authentication/util/AuthToken.java | 218 +++
 .../server/TestAuthenticationToken.java | 100 -
 .../authentication/util/TestAuthToken.java  | 127 +++
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 6 files changed, 385 insertions(+), 251 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ae0d424/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index 323b019..e107810 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -14,6 +14,7 @@
 package org.apache.hadoop.security.authentication.client;
 
 import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.security.authentication.util.AuthToken;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSManager;
@@ -29,6 +30,7 @@ import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.Configuration;
 import javax.security.auth.login.LoginContext;
 import javax.security.auth.login.LoginException;
+
 import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
@@ -187,13 +189,18 @@ public class KerberosAuthenticator implements 
Authenticator {
   conn.setRequestMethod(AUTH_HTTP_METHOD);
   conn.connect();
   
+  boolean needFallback = false;
   if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
 LOG.debug(JDK performed authentication on our behalf.);
 // If the JDK already did the SPNEGO back-and-forth for
 // us, just pull out the token.
 AuthenticatedURL.extractToken(conn, token);
-return;
-  } else if (isNegotiate()) {
+if (isTokenKerberos(token)) {
+  return;
+}
+needFallback = true;
+  }
+  if (!needFallback  isNegotiate()) {
 LOG.debug(Performing our own SPNEGO sequence.);
 doSpnegoSequence(token);
   } else {
@@ -225,6 +232,21 @@ public class KerberosAuthenticator implements 
Authenticator {
   }
 
   /*
+   * Check if the passed token is of type kerberos or kerberos-dt
+   */
+  private boolean isTokenKerberos(AuthenticatedURL.Token token)
+  throws AuthenticationException {
+if (token.isSet()) {
+  AuthToken aToken = AuthToken.parse(token.toString());  
+  if (aToken.getType().equals(kerberos) ||
+  aToken.getType().equals(kerberos-dt)) {  
+return true;
+  }
+}
+return false;
+  }
+
+  /*
   * Indicates if the response is starting a SPNEGO negotiation.
   */
   private boolean isNegotiate() throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ae0d424/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
index bb3e71d..0e2b45d 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
@@ -14,14 +14,9 @@
 package 

svn commit: r1661743 - in /hadoop/common/site/main/author/src/documentation/content/xdocs: index.xml releases.xml

2015-02-23 Thread wheat9
Author: wheat9
Date: Mon Feb 23 18:53:40 2015
New Revision: 1661743

URL: http://svn.apache.org/r1661743
Log:
Fix the XML to ensure that the site can be compiled against forrest.


Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/index.xml
hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml

Modified: 
hadoop/common/site/main/author/src/documentation/content/xdocs/index.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/index.xml?rev=1661743r1=1661742r2=1661743view=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/index.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/index.xml 
Mon Feb 23 18:53:40 2015
@@ -145,19 +145,21 @@
 enhancements such as:
 /p
 ul
-  liHadoop Common/li
+  liHadoop Common
   ul
 liKey management server (beta)/li
 liCredential provider (beta)/li
   /ul
-  liHadoop HDFS/li
+  /li
+  liHadoop HDFS
   ul
-liHeterogeneous Storage Tiers - Phase 2/li
+liHeterogeneous Storage Tiers - Phase 2
   ul
   liApplication APIs for heterogeneous storage/li
   liSSD storage tier/li
   liMemory as a storage tier (beta)/li
   /ul
+/li
 liSupport for Archival Storage/li
 liTransparent data at rest encryption (beta)/li
 liOperating secure DataNode without requiring root 
@@ -166,17 +168,20 @@
   without restarting data node (beta)/li
 liAES support for faster wire encryption/li
   /ul
-  liHadoop YARN/li
+  /li
+  liHadoop YARN
   ul
-liSupport for long running services in YARN/li
+liSupport for long running services in YARN
   ul
   liService Registry for applications/li
   /ul
-liSupport for rolling upgrades/li
+/li
+liSupport for rolling upgrades
   ul
 liWork-preserving restarts of ResourceManager/li
 liContainer-preserving restart of NodeManager/li
   /ul
+/li
 liSupport node labels during scheduling/li
 liSupport for time-based resource reservations in 
   Capacity Scheduler (beta)/li
@@ -184,6 +189,7 @@
 liSupport running of applications natively in 
   Docker containers (alpha)/li
   /ul
+  /li
 /ul
 
 p 

Modified: 
hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml?rev=1661743r1=1661742r2=1661743view=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
Mon Feb 23 18:53:40 2015
@@ -43,16 +43,17 @@
 /p
 
 ul
-  liHadoop Common/li
+  liHadoop Common
   ul
 lia href=https://issues.apache.org/jira/browse/HADOOP-10433;
 HADOOP-10433/a - Key management server (beta)/li
 lia href=https://issues.apache.org/jira/browse/HADOOP-10607;
 HADOOP-10607/a - Credential provider (beta)/li
   /ul
-  liHadoop HDFS/li
+  /li
+  liHadoop HDFS
   ul
-liHeterogeneous Storage Tiers - Phase 2/li
+liHeterogeneous Storage Tiers - Phase 2
   ul
   lia href=https://issues.apache.org/jira/browse/HDFS-5682;
   HDFS-5682/a - Application APIs for heterogeneous 
storage/li
@@ -61,6 +62,7 @@
   lia href=https://issues.apache.org/jira/browse/HDFS-5851;
   HDFS-5851/a - Memory as a storage tier (beta)/li
   /ul
+/li
 lia href=https://issues.apache.org/jira/browse/HDFS-6584;
 HDFS-6584/a - Support for Archival Storage/li
 lia href=https://issues.apache.org/jira/browse/HDFS-6134;
@@ -74,22 +76,25 @@
 lia href=https://issues.apache.org/jira/browse/HDFS-6606;
 HDFS-6606/a - AES support for faster wire encryption/li
   /ul
-  liHadoop YARN/li
+  /li
+  liHadoop YARN
   ul
 lia href=https://issues.apache.org/jira/browse/YARN-896;
-YARN-896/a - Support for long running services in YARN/li
+YARN-896/a - Support for long running services in YARN
   ul
   lia 

svn commit: r1661751 - in /hadoop/common/site/main/publish: who.html who.pdf

2015-02-23 Thread wheat9
Author: wheat9
Date: Mon Feb 23 19:08:12 2015
New Revision: 1661751

URL: http://svn.apache.org/r1661751
Log:
Add wheat9 as a Hadoop PMC member.

Modified:
hadoop/common/site/main/publish/who.html
hadoop/common/site/main/publish/who.pdf

Modified: hadoop/common/site/main/publish/who.html
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/who.html?rev=1661751r1=1661750r2=1661751view=diff
==
--- hadoop/common/site/main/publish/who.html (original)
+++ hadoop/common/site/main/publish/who.html Mon Feb 23 19:08:12 2015
@@ -3,7 +3,7 @@
 head
 META http-equiv=Content-Type content=text/html; charset=UTF-8
 meta content=Apache Forrest name=Generator
-meta name=Forrest-version content=0.8
+meta name=Forrest-version content=0.9
 meta name=Forrest-skin-name content=hadoop-pelt
 titleWho We Are/title
 link type=text/css href=skin/basic.css rel=stylesheet
@@ -253,7 +253,7 @@ document.write(Last Published:  + docu
 /div 
 
 
-a name=N1000C/aa name=Apache+Hadoop+Project+Members/a
+a name=N1000D/aa name=Apache+Hadoop+Project+Members/a
 h2 class=h3Apache Hadoop Project Members/h2
 div class=section
 p
@@ -271,7 +271,7 @@ document.write(Last Published:  + docu
 /div
 
 
-a name=N10016/aa name=Hadoop+PMC/a
+a name=N10017/aa name=Hadoop+PMC/a
 h2 class=h3Hadoop PMC/h2
 div class=section
 p
@@ -852,6 +852,17 @@ document.write(Last Published:  + docu

 /tr
 
+   
+tr
+ 
+td colspan=1 rowspan=1wheat9/td
+ td colspan=1 rowspan=1a href=http://haohui.me;Haohui 
Mai/a/td
+ td colspan=1 rowspan=1Hortonworks/td
+ td colspan=1 rowspan=1/td
+ td colspan=1 rowspan=1-8/td
+   
+/tr
+
 
 tr
 
@@ -879,7 +890,7 @@ document.write(Last Published:  + docu
 /div
 
 
-a name=N10613/aa name=Emeritus+Hadoop+PMC+Members/a
+a name=N10631/aa name=Emeritus+Hadoop+PMC+Members/a
 h2 class=h3Emeritus Hadoop PMC Members/h2
 div class=section
 ul
@@ -894,7 +905,7 @@ document.write(Last Published:  + docu
 /div
 

-a name=N10626/aa name=Hadoop+Committers/a
+a name=N10644/aa name=Hadoop+Committers/a
 h2 class=h3Hadoop Committers/h2
 div class=section
 pHadoop's active committers include:/p
@@ -1806,7 +1817,7 @@ document.write(Last Published:  + docu
 tr
  
 td colspan=1 rowspan=1wheat9/td
- td colspan=1 rowspan=1Haohui Mai/td
+ td colspan=1 rowspan=1a href=http://haohui.me;Haohui 
Mai/a/td
  td colspan=1 rowspan=1Hortonworks/td
  td colspan=1 rowspan=1/td
  td colspan=1 rowspan=1-8/td
@@ -1862,7 +1873,7 @@ document.write(Last Published:  + docu
 /div
 

-a name=N10FDF/aa name=Emeritus+Hadoop+Committers/a
+a name=N10FFF/aa name=Emeritus+Hadoop+Committers/a
 h2 class=h3Emeritus Hadoop Committers/h2
 div class=section
 pHadoop committers who are no longer active include:/p

Modified: hadoop/common/site/main/publish/who.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/who.pdf?rev=1661751r1=1661750r2=1661751view=diff
==
Binary files - no diff available.




[38/50] [abbrv] hadoop git commit: HDFS-7778. Rename FsVolumeListTest to TestFsVolumeList and commit it to branch-2. Contributed by Lei (Eddy) Xu.

2015-02-23 Thread zhz
HDFS-7778. Rename FsVolumeListTest to TestFsVolumeList and commit it to 
branch-2. Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afebf701
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afebf701
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afebf701

Branch: refs/heads/HDFS-EC
Commit: afebf7011c0dd5fe40f70255a530fa8a26911b27
Parents: 08bc0c0
Author: cnauroth cnaur...@apache.org
Authored: Fri Feb 13 16:30:28 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:50 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../fsdataset/impl/FsVolumeListTest.java| 101 ---
 .../fsdataset/impl/TestFsVolumeList.java| 101 +++
 3 files changed, 104 insertions(+), 101 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afebf701/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1ec2bd2..196b1bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -948,6 +948,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7686. Re-add rapid rescan of possibly corrupt block feature to the
 block scanner (cmccabe)
 
+HDFS-7778. Rename FsVolumeListTest to TestFsVolumeList and commit it to
+branch-2. (Lei (Eddy) Xu via cnauroth)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afebf701/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeListTest.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeListTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeListTest.java
deleted file mode 100644
index 691d390..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeListTest.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystemTestHelper;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.StorageType;
-import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
-import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.junit.Assert.assertNotEquals;
-import static org.mockito.Mockito.mock;
-
-public class FsVolumeListTest {
-
-  private final Configuration conf = new Configuration();
-  private VolumeChoosingPolicyFsVolumeImpl blockChooser =
-  new RoundRobinVolumeChoosingPolicy();
-  private FsDatasetImpl dataset = null;
-  private String baseDir;
-  private BlockScanner blockScanner;
-
-  @Before
-  public void setUp() {
-dataset = mock(FsDatasetImpl.class);
-baseDir = new FileSystemTestHelper().getTestRootDir();
-Configuration blockScannerConf = new Configuration();
-blockScannerConf.setInt(DFSConfigKeys.
-DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
-blockScanner = new BlockScanner(null, blockScannerConf);
-  }
-
-  @Test
-  public void testGetNextVolumeWithClosedVolume() throws IOException {
-FsVolumeList volumeList = new 

[43/50] [abbrv] hadoop git commit: MAPREDUCE-6256. Removed unused private methods in o.a.h.mapreduce.Job.java. Contributed by Naganarasimha G R.

2015-02-23 Thread zhz
MAPREDUCE-6256. Removed unused private methods in o.a.h.mapreduce.Job.java. 
Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb736e91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb736e91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb736e91

Branch: refs/heads/HDFS-EC
Commit: bb736e91e35a951ec684e3345a6670a74e485907
Parents: c26687e
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Sun Feb 15 10:09:42 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:51 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../java/org/apache/hadoop/mapreduce/Job.java   | 47 
 2 files changed, 3 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb736e91/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 38982e4..bb9e105 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -291,6 +291,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6255. Fix JobCounter's format to use grouping separator.
 (Ryu Kobayashi via ozawa)
 
+MAPREDUCE-6256. Removed unused private methods in o.a.h.mapreduce.Job.java.
+(Naganarasimha G R via ozawa)
+
   OPTIMIZATIONS
 
 MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb736e91/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
index 493ca5f..470290c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
@@ -98,9 +98,6 @@ public class Job extends JobContextImpl implements JobContext 
{
 mapreduce.client.genericoptionsparser.used;
   public static final String SUBMIT_REPLICATION = 
 mapreduce.client.submit.file.replication;
-  private static final String TASKLOG_PULL_TIMEOUT_KEY =
-   mapreduce.client.tasklog.timeout;
-  private static final int DEFAULT_TASKLOG_TIMEOUT = 6;
 
   @InterfaceStability.Evolving
   public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }
@@ -340,10 +337,6 @@ public class Job extends JobContextImpl implements 
JobContext {
 updateStatus();
 return status;
   }
-  
-  private void setStatus(JobStatus status) {
-this.status = status;
-  }
 
   /**
* Returns the current state of the Job.
@@ -1393,46 +1386,6 @@ public class Job extends JobContextImpl implements 
JobContext {
 return success;
   }
 
-  /**
-   * @return true if the profile parameters indicate that this is using
-   * hprof, which generates profile files in a particular location
-   * that we can retrieve to the client.
-   */
-  private boolean shouldDownloadProfile() {
-// Check the argument string that was used to initialize profiling.
-// If this indicates hprof and file-based output, then we're ok to
-// download.
-String profileParams = getProfileParams();
-
-if (null == profileParams) {
-  return false;
-}
-
-// Split this on whitespace.
-String [] parts = profileParams.split([ \\t]+);
-
-// If any of these indicate hprof, and the use of output files, return 
true.
-boolean hprofFound = false;
-boolean fileFound = false;
-for (String p : parts) {
-  if (p.startsWith(-agentlib:hprof) || p.startsWith(-Xrunhprof)) {
-hprofFound = true;
-
-// This contains a number of comma-delimited components, one of which
-// may specify the file to write to. Make sure this is present and
-// not empty.
-String [] subparts = p.split(,);
-for (String sub : subparts) {
-  if (sub.startsWith(file=)  sub.length() != file=.length()) {
-fileFound = true;
-  }
-}
-  }
-}
-
-return hprofFound  fileFound;
-  }
-
   private void printTaskEvents(TaskCompletionEvent[] events,
   Job.TaskStatusFilter filter, boolean profiling, IntegerRanges mapRanges,
   IntegerRanges reduceRanges) throws IOException, InterruptedException {



[25/50] [abbrv] hadoop git commit: HDFS-7668. Convert site documentation from apt to markdown (Masatake Iwasaki via aw)

2015-02-23 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a45ef2b6/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
deleted file mode 100644
index 8555b23..000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
+++ /dev/null
@@ -1,364 +0,0 @@
-
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~   http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License. See accompanying LICENSE file.
-
-  ---
-  Hadoop Distributed File System-${project.version} - HDFS NFS Gateway
-  ---
-  ---
-  ${maven.build.timestamp}
-
-HDFS NFS Gateway
-
-%{toc|section=1|fromDepth=0}
-
-* {Overview}
-
-  The NFS Gateway supports NFSv3 and allows HDFS to be mounted as part of the 
client's local file system.
-  Currently NFS Gateway supports and enables the following usage patterns:
-
-   * Users can browse the HDFS file system through their local file system
- on NFSv3 client compatible operating systems.
-
-   * Users can download files from the the HDFS file system on to their
- local file system.
-
-   * Users can upload files from their local file system directly to the
- HDFS file system.
-
-   * Users can stream data directly to HDFS through the mount point. File
- append is supported but random write is not supported.
-
-  The NFS gateway machine needs the same thing to run an HDFS client like 
Hadoop JAR files, HADOOP_CONF directory.
-  The NFS gateway can be on the same host as DataNode, NameNode, or any HDFS 
client.
-
-
-* {Configuration}
-
-   The NFS-gateway uses proxy user to proxy all the users accessing the NFS 
mounts.
-   In non-secure mode, the user running the gateway is the proxy user, while 
in secure mode the
-   user in Kerberos keytab is the proxy user. Suppose the proxy user is 
'nfsserver'
-   and users belonging to the groups 'users-group1'
-   and 'users-group2' use the NFS mounts, then in core-site.xml of the 
NameNode, the following
-   two properities must be set and only NameNode needs restart after the 
configuration change
-   (NOTE: replace the string 'nfsserver' with the proxy user name in your 
cluster):
-
-
-property
-  namehadoop.proxyuser.nfsserver.groups/name
-  valueroot,users-group1,users-group2/value
-  description
- The 'nfsserver' user is allowed to proxy all members of the 
'users-group1' and
- 'users-group2' groups. Note that in most cases you will need to 
include the
- group root because the user root (which usually belonges to 
root group) will
- generally be the user that initially executes the mount on the NFS 
client system.
- Set this to '*' to allow nfsserver user to proxy any group.
-  /description
-/property
-
-
-
-property
-  namehadoop.proxyuser.nfsserver.hosts/name
-  valuenfs-client-host1.com/value
-  description
- This is the host where the nfs gateway is running. Set this to '*' to 
allow
- requests from any hosts to be proxied.
-  /description
-/property
-
-
-   The above are the only required configuration for the NFS gateway in 
non-secure mode. For Kerberized
-   hadoop clusters, the following configurations need to be added to 
hdfs-site.xml for the gateway (NOTE: replace
-   string nfsserver with the proxy user name and ensure the user contained 
in the keytab is
-   also the same proxy user):
-
-
-  property
-namenfs.keytab.file/name
-value/etc/hadoop/conf/nfsserver.keytab/value !-- path to the nfs 
gateway keytab --
-  /property
-
-
-
-  property
-namenfs.kerberos.principal/name
-valuenfsserver/_h...@your-realm.com/value
-  /property
-
-
-   The rest of the NFS gateway configurations are optional for both secure and 
non-secure mode.
-
-   The AIX NFS client has a 
{{{https://issues.apache.org/jira/browse/HDFS-6549}few known issues}}
-   that prevent it from working correctly by default with the HDFS NFS
-   Gateway. If you want to be able to access the HDFS NFS Gateway from AIX, you
-   should set the following configuration setting to enable work-arounds for 
these
-   issues:
-
-
-property
-  namenfs.aix.compatibility.mode.enabled/name
-  valuetrue/value
-/property
-
-
-   Note that regular, non-AIX clients should NOT enable AIX compatibility mode.
-   The work-arounds implemented by AIX compatibility mode effectively disable
-   safeguards 

[21/50] [abbrv] hadoop git commit: HDFS-7668. Convert site documentation from apt to markdown (Masatake Iwasaki via aw)

2015-02-23 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a45ef2b6/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
new file mode 100644
index 000..626a473
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
@@ -0,0 +1,678 @@
+!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+HDFS High Availability
+==
+
+* [HDFS High Availability](#HDFS_High_Availability)
+* [Purpose](#Purpose)
+* [Note: Using the Quorum Journal Manager or Conventional Shared 
Storage](#Note:_Using_the_Quorum_Journal_Manager_or_Conventional_Shared_Storage)
+* [Background](#Background)
+* [Architecture](#Architecture)
+* [Hardware resources](#Hardware_resources)
+* [Deployment](#Deployment)
+* [Configuration overview](#Configuration_overview)
+* [Configuration details](#Configuration_details)
+* [Deployment details](#Deployment_details)
+* [Administrative commands](#Administrative_commands)
+* [Automatic Failover](#Automatic_Failover)
+* [Introduction](#Introduction)
+* [Components](#Components)
+* [Deploying ZooKeeper](#Deploying_ZooKeeper)
+* [Before you begin](#Before_you_begin)
+* [Configuring automatic failover](#Configuring_automatic_failover)
+* [Initializing HA state in 
ZooKeeper](#Initializing_HA_state_in_ZooKeeper)
+* [Starting the cluster with 
start-dfs.sh](#Starting_the_cluster_with_start-dfs.sh)
+* [Starting the cluster manually](#Starting_the_cluster_manually)
+* [Securing access to ZooKeeper](#Securing_access_to_ZooKeeper)
+* [Verifying automatic failover](#Verifying_automatic_failover)
+* [Automatic Failover FAQ](#Automatic_Failover_FAQ)
+* [BookKeeper as a Shared storage 
(EXPERIMENTAL)](#BookKeeper_as_a_Shared_storage_EXPERIMENTAL)
+
+Purpose
+---
+
+This guide provides an overview of the HDFS High Availability (HA) feature and 
how to configure and manage an HA HDFS cluster, using NFS for the shared 
storage required by the NameNodes.
+
+This document assumes that the reader has a general understanding of general 
components and node types in an HDFS cluster. Please refer to the HDFS 
Architecture guide for details.
+
+Note: Using the Quorum Journal Manager or Conventional Shared Storage
+-
+
+This guide discusses how to configure and use HDFS HA using a shared NFS 
directory to share edit logs between the Active and Standby NameNodes. For 
information on how to configure HDFS HA using the Quorum Journal Manager 
instead of NFS, please see [this alternative 
guide.](./HDFSHighAvailabilityWithQJM.html)
+
+Background
+--
+
+Prior to Hadoop 2.0.0, the NameNode was a single point of failure (SPOF) in an 
HDFS cluster. Each cluster had a single NameNode, and if that machine or 
process became unavailable, the cluster as a whole would be unavailable until 
the NameNode was either restarted or brought up on a separate machine.
+
+This impacted the total availability of the HDFS cluster in two major ways:
+
+* In the case of an unplanned event such as a machine crash, the cluster would
+  be unavailable until an operator restarted the NameNode.
+
+* Planned maintenance events such as software or hardware upgrades on the
+  NameNode machine would result in windows of cluster downtime.
+
+The HDFS High Availability feature addresses the above problems by providing 
the option of running two redundant NameNodes in the same cluster in an 
Active/Passive configuration with a hot standby. This allows a fast failover to 
a new NameNode in the case that a machine crashes, or a graceful 
administrator-initiated failover for the purpose of planned maintenance.
+
+Architecture
+
+
+In a typical HA cluster, two separate machines are configured as NameNodes. At 
any point in time, exactly one of the NameNodes is in an *Active* state, and 
the other is in a *Standby* state. The Active NameNode is responsible for all 
client operations in the cluster, while the Standby is simply acting as a 
slave, maintaining enough state to provide 

[36/50] [abbrv] hadoop git commit: update CHANGES.txt for HDFS-7430, HDFS-7721, HDFS-7686

2015-02-23 Thread zhz
update CHANGES.txt for HDFS-7430, HDFS-7721, HDFS-7686


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63847077
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63847077
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63847077

Branch: refs/heads/HDFS-EC
Commit: 63847077f5c80729dc2b2ecc0f59907759156971
Parents: 904786f
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Fri Feb 13 15:15:31 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:50 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 17 +
 1 file changed, 9 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63847077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 747f54a..610d45c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -130,9 +130,6 @@ Trunk (Unreleased)
 HDFS-7591. hdfs classpath command should support same options as hadoop
 classpath (Varun Saxena via Arpit Agarwal)
 
-HDFS-7430. Rewrite the BlockScanner to use O(1) memory and use multiple
-threads (cmccabe)
-
 HDFS-7546. Document, and set an accepting default for
 dfs.namenode.kerberos.principal.pattern (Harsh J via aw)
 
@@ -289,9 +286,6 @@ Trunk (Unreleased)
 HDFS-7320. The appearance of hadoop-hdfs-httpfs site docs is inconsistent 
 (Masatake Iwasaki via aw)
 
-HDFS-7721. The HDFS BlockScanner may run fast during the first hour
-(cmccabe)
-
 HDFS-7670. HDFS Quota guide has typos, incomplete command lines
 (Brahma Reddy Battula via aw)
 
@@ -637,6 +631,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7790. Do not create optional fields in DFSInputStream unless they are
 needed (cmccabe)
 
+HDFS-7430. Refactor the BlockScanner to use O(1) memory and use multiple
+threads (cmccabe)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.
@@ -945,6 +942,12 @@ Release 2.7.0 - UNRELEASED
 HDFS-7704. DN heartbeat to Active NN may be blocked and expire if
 connection to Standby NN continues to time out (Rushabh Shah via kihwal)
 
+HDFS-7721. The HDFS BlockScanner may run fast during the first hour
+(cmccabe)
+
+HDFS-7686. Re-add rapid rescan of possibly corrupt block feature to the
+block scanner (cmccabe)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode
@@ -959,8 +962,6 @@ Release 2.7.0 - UNRELEASED
   HDFS-7776. Adding additional unit tests for Quota By Storage Type.
   (Xiaoyu Yao via Arpit Agarwal)
 
-  HDFS-7686. Re-add rapid rescan of possibly corrupt block feature to the
-  block scanner (cmccabe)
 
 Release 2.6.1 - UNRELEASED
 



[39/52] [abbrv] hadoop git commit: YARN-3238. Connection timeouts to nodemanagers are retried at multiple levels. Contributed by Jason Lowe

2015-02-23 Thread zhz
YARN-3238. Connection timeouts to nodemanagers are retried at multiple
levels. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92d67ace
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92d67ace
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92d67ace

Branch: refs/heads/HDFS-7285
Commit: 92d67ace3248930c0c0335070cc71a480c566a36
Parents: 8b465b4
Author: Xuan xg...@apache.org
Authored: Sat Feb 21 16:06:12 2015 -0800
Committer: Xuan xg...@apache.org
Committed: Sat Feb 21 16:06:12 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java  | 1 -
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92d67ace/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 359e647..1650a20 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -637,6 +637,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3194. RM should handle NMContainerStatuses sent by NM while
 registering if NM is Reconnected node (Rohith via jlowe)
 
+YARN-3238. Connection timeouts to nodemanagers are retried at
+multiple levels (Jason Lowe via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92d67ace/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
index b6fea62..6024560 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
@@ -72,7 +72,6 @@ public class ServerProxy {
 exceptionToPolicyMap.put(ConnectException.class, retryPolicy);
 exceptionToPolicyMap.put(NoRouteToHostException.class, retryPolicy);
 exceptionToPolicyMap.put(UnknownHostException.class, retryPolicy);
-exceptionToPolicyMap.put(ConnectTimeoutException.class, retryPolicy);
 exceptionToPolicyMap.put(RetriableException.class, retryPolicy);
 exceptionToPolicyMap.put(SocketException.class, retryPolicy);
 



[03/52] [abbrv] hadoop git commit: HADOOP-11593. Convert site documentation from apt to markdown (stragglers) (Masatake Iwasaki via aw)

2015-02-23 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6fc1f3e/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
--
diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm 
b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
new file mode 100644
index 000..44b5bfb
--- /dev/null
+++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
@@ -0,0 +1,864 @@
+!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+#set ( $H3 = '###' )
+#set ( $H4 = '' )
+#set ( $H5 = '#' )
+
+Hadoop Key Management Server (KMS) - Documentation Sets
+===
+
+Hadoop KMS is a cryptographic key management server based on Hadoop's 
**KeyProvider** API.
+
+It provides a client and a server components which communicate over HTTP using 
a REST API.
+
+The client is a KeyProvider implementation interacts with the KMS using the 
KMS HTTP REST API.
+
+KMS and its client have built-in security and they support HTTP SPNEGO 
Kerberos authentication and HTTPS secure transport.
+
+KMS is a Java web-application and it runs using a pre-configured Tomcat 
bundled with the Hadoop distribution.
+
+KMS Client Configuration
+
+
+The KMS client `KeyProvider` uses the **kms** scheme, and the embedded URL 
must be the URL of the KMS. For example, for a KMS running on 
`http://localhost:16000/kms`, the KeyProvider URI is 
`kms://http@localhost:16000/kms`. And, for a KMS running on 
`https://localhost:16000/kms`, the KeyProvider URI is 
`kms://https@localhost:16000/kms`
+
+KMS
+---
+
+$H3 KMS Configuration
+
+Configure the KMS backing KeyProvider properties in the 
`etc/hadoop/kms-site.xml` configuration file:
+
+```xml
+  property
+ namehadoop.kms.key.provider.uri/name
+ valuejceks://file@/${user.home}/kms.keystore/value
+  /property
+
+  property
+namehadoop.security.keystore.java-keystore-provider.password-file/name
+valuekms.keystore.password/value
+  /property
+```
+
+The password file is looked up in the Hadoop's configuration directory via the 
classpath.
+
+NOTE: You need to restart the KMS for the configuration changes to take effect.
+
+$H3 KMS Cache
+
+KMS caches keys for short period of time to avoid excessive hits to the 
underlying key provider.
+
+The Cache is enabled by default (can be dissabled by setting the 
`hadoop.kms.cache.enable` boolean property to false)
+
+The cache is used with the following 3 methods only, `getCurrentKey()` and 
`getKeyVersion()` and `getMetadata()`.
+
+For the `getCurrentKey()` method, cached entries are kept for a maximum of 
3 millisecond regardless the number of times the key is being access (to 
avoid stale keys to be considered current).
+
+For the `getKeyVersion()` method, cached entries are kept with a default 
inactivity timeout of 60 milliseconds (10 mins). This time out is 
configurable via the following property in the `etc/hadoop/kms-site.xml` 
configuration file:
+
+```xml
+   property
+ namehadoop.kms.cache.enable/name
+ valuetrue/value
+   /property
+
+   property
+ namehadoop.kms.cache.timeout.ms/name
+ value60/value
+   /property
+
+   property
+ namehadoop.kms.current.key.cache.timeout.ms/name
+ value3/value
+   /property
+```
+
+$H3 KMS Aggregated Audit logs
+
+Audit logs are aggregated for API accesses to the GET\_KEY\_VERSION, 
GET\_CURRENT\_KEY, DECRYPT\_EEK, GENERATE\_EEK operations.
+
+Entries are grouped by the (user,key,operation) combined key for a 
configurable aggregation interval after which the number of accesses to the 
specified end-point by the user for a given key is flushed to the audit log.
+
+The Aggregation interval is configured via the property :
+
+  property
+namehadoop.kms.aggregation.delay.ms/name
+value1/value
+  /property
+
+$H3 Start/Stop the KMS
+
+To start/stop KMS use KMS's bin/kms.sh script. For example:
+
+hadoop-${project.version} $ sbin/kms.sh start
+
+NOTE: Invoking the script without any parameters list all possible parameters 
(start, stop, run, etc.). The `kms.sh` script is a wrapper for Tomcat's 
`catalina.sh` script that sets the environment variables and Java System 
properties required to run KMS.
+
+$H3 Embedded Tomcat Configuration
+
+To configure the embedded Tomcat go to the `share/hadoop/kms/tomcat/conf`.
+
+KMS pre-configures the 

[08/52] [abbrv] hadoop git commit: MAPREDUCE-6261. NullPointerException if MapOutputBuffer.flush invoked twice. Contributed by Tsuyoshi OZAWA

2015-02-23 Thread zhz
MAPREDUCE-6261. NullPointerException if MapOutputBuffer.flush invoked twice. 
Contributed by Tsuyoshi OZAWA


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4981d082
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4981d082
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4981d082

Branch: refs/heads/HDFS-7285
Commit: 4981d082d4f3c82d1c2c900c7488b83bf20301cc
Parents: 1714609
Author: Jason Lowe jl...@apache.org
Authored: Wed Feb 18 19:28:02 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Wed Feb 18 19:28:02 2015 +

--
 hadoop-mapreduce-project/CHANGES.txt | 3 +++
 .../src/main/java/org/apache/hadoop/mapred/MapTask.java  | 4 
 2 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4981d082/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index e944d82..7f4c3e7 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -379,6 +379,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-4286. TestClientProtocolProviderImpls passes on failure 
 conditions. (Devaraj K via ozawa)
 
+MAPREDUCE-6261. NullPointerException if MapOutputBuffer.flush invoked
+twice (Tsuyoshi OZAWA via jlowe)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4981d082/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
index 1a4901b..8094317 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
@@ -1458,6 +1458,10 @@ public class MapTask extends Task {
 public void flush() throws IOException, ClassNotFoundException,
InterruptedException {
   LOG.info(Starting flush of map output);
+  if (kvbuffer == null) {
+LOG.info(kvbuffer is null. Skipping flush.);
+return;
+  }
   spillLock.lock();
   try {
 while (spillInProgress) {



[35/50] [abbrv] hadoop git commit: YARN-2847. Linux native container executor segfaults if default banned user detected. Contributed by Olaf Flebbe

2015-02-23 Thread zhz
YARN-2847. Linux native container executor segfaults if default banned user 
detected. Contributed by Olaf Flebbe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61164671
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61164671
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61164671

Branch: refs/heads/HDFS-EC
Commit: 61164671faaea80c65c071dc2d82c9ac21d1b890
Parents: e9c5d7e
Author: Jason Lowe jl...@apache.org
Authored: Fri Feb 13 20:20:07 2015 +
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:49 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../impl/container-executor.c   |  3 ++-
 .../test/test-container-executor.c  | 24 
 3 files changed, 24 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61164671/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cd9b467..1644268 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -593,6 +593,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3164. RMAdmin command usage prints incorrect command name. 
 (Bibin A Chundatt via xgong)
 
+YARN-2847. Linux native container executor segfaults if default banned
+user detected (Olaf Flebbe via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61164671/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 04d0232..edfd25f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -684,8 +684,9 @@ struct passwd* check_user(const char *user) {
 return NULL;
   }
   char **banned_users = get_values(BANNED_USERS_KEY);
-  char **banned_user = (banned_users == NULL) ? 
+  banned_users = banned_users == NULL ?
 (char**) DEFAULT_BANNED_USERS : banned_users;
+  char **banned_user = banned_users;
   for(; *banned_user; ++banned_user) {
 if (strcmp(*banned_user, user) == 0) {
   free(user_info);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61164671/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 7f08e06..be6cc49 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -89,15 +89,19 @@ void run(const char *cmd) {
   }
 }
 
-int write_config_file(char *file_name) {
+int write_config_file(char *file_name, int banned) {
   FILE *file;
   file = fopen(file_name, w);
   if (file == NULL) {
 printf(Failed to open %s.\n, file_name);
 return EXIT_FAILURE;
   }
-  fprintf(file, banned.users=bannedUser\n);
-  fprintf(file, min.user.id=500\n);
+  if (banned != 0) {
+fprintf(file, banned.users=bannedUser\n);
+fprintf(file, min.user.id=500\n);
+  } else {
+fprintf(file, min.user.id=0\n);
+  }
   fprintf(file, allowed.system.users=allowedUser,daemon\n);
   fclose(file);
   return 0;
@@ -385,7 +389,7 @@ void test_delete_user() {
 
   char buffer[10];
   sprintf(buffer, %s/test.cfg, app_dir);
-  if (write_config_file(buffer) != 0) {
+  if (write_config_file(buffer, 1) != 0) {
 exit(1);
   }
 
@@ -745,7 +749,7 @@ int main(int argc, char **argv) {
 exit(1);
   }
   
-  if (write_config_file(TEST_ROOT /test.cfg) != 0) {
+  if (write_config_file(TEST_ROOT /test.cfg, 

[14/50] [abbrv] hadoop git commit: HDFS-7790. Do not create optional fields in DFSInputStream unless they are needed (cmccabe)

2015-02-23 Thread zhz
HDFS-7790. Do not create optional fields in DFSInputStream unless they are 
needed (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f390c004
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f390c004
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f390c004

Branch: refs/heads/HDFS-EC
Commit: f390c00445fb68002c6b107731037c587b64e397
Parents: 693e43c
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Thu Feb 12 11:12:26 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:48 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 22 ++--
 2 files changed, 19 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f390c004/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 09ae2e7..9117fc8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -631,6 +631,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7684. The host:port settings of the daemons should be trimmed before
 use. (Anu Engineer via aajisaka)
 
+HDFS-7790. Do not create optional fields in DFSInputStream unless they are
+needed (cmccabe)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f390c004/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 25c23e1..09d6513 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -127,8 +127,15 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
* The value type can be either ByteBufferPool or ClientMmap, depending on
* whether we this is a memory-mapped buffer or not.
*/
-  private final IdentityHashStoreByteBuffer, Object
+  private IdentityHashStoreByteBuffer, Object extendedReadBuffers;
+
+  private synchronized IdentityHashStoreByteBuffer, Object
+getExtendedReadBuffers() {
+if (extendedReadBuffers == null) {
   extendedReadBuffers = new IdentityHashStoreByteBuffer, Object(0);
+}
+return extendedReadBuffers;
+  }
 
   public static class ReadStatistics {
 public ReadStatistics() {
@@ -236,7 +243,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   private final ConcurrentHashMapDatanodeInfo, DatanodeInfo deadNodes =
  new ConcurrentHashMapDatanodeInfo, DatanodeInfo();
 
-  private final byte[] oneByteBuf = new byte[1]; // used for 'int read()'
+  private byte[] oneByteBuf; // used for 'int read()'
 
   void addToDeadNodes(DatanodeInfo dnInfo) {
 deadNodes.put(dnInfo, dnInfo);
@@ -670,7 +677,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 }
 dfsClient.checkOpen();
 
-if (!extendedReadBuffers.isEmpty()) {
+if ((extendedReadBuffers != null)  (!extendedReadBuffers.isEmpty())) {
   final StringBuilder builder = new StringBuilder();
   extendedReadBuffers.visitAll(new IdentityHashStore.VisitorByteBuffer, 
Object() {
 private String prefix = ;
@@ -690,6 +697,9 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 
   @Override
   public synchronized int read() throws IOException {
+if (oneByteBuf == null) {
+  oneByteBuf = new byte[1];
+}
 int ret = read( oneByteBuf, 0, 1 );
 return ( ret = 0 ) ? -1 : (oneByteBuf[0]  0xff);
   }
@@ -1708,7 +1718,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 }
 buffer = ByteBufferUtil.fallbackRead(this, bufferPool, maxLength);
 if (buffer != null) {
-  extendedReadBuffers.put(buffer, bufferPool);
+  getExtendedReadBuffers().put(buffer, bufferPool);
 }
 return buffer;
   }
@@ -1787,7 +1797,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   buffer = clientMmap.getMappedByteBuffer().asReadOnlyBuffer();
   buffer.position((int)blockPos);
   buffer.limit((int)(blockPos + length));
-  extendedReadBuffers.put(buffer, clientMmap);
+  getExtendedReadBuffers().put(buffer, clientMmap);
   synchronized (infoLock) {
 

[18/50] [abbrv] hadoop git commit: HDFS-7668. Convert site documentation from apt to markdown (Masatake Iwasaki via aw)

2015-02-23 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a45ef2b6/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
new file mode 100644
index 000..7afb0f5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -0,0 +1,1939 @@
+!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+WebHDFS REST API
+
+
+* [WebHDFS REST API](#WebHDFS_REST_API)
+* [Document Conventions](#Document_Conventions)
+* [Introduction](#Introduction)
+* [Operations](#Operations)
+* [FileSystem URIs vs HTTP URLs](#FileSystem_URIs_vs_HTTP_URLs)
+* [HDFS Configuration Options](#HDFS_Configuration_Options)
+* [Authentication](#Authentication)
+* [Proxy Users](#Proxy_Users)
+* [File and Directory Operations](#File_and_Directory_Operations)
+* [Create and Write to a File](#Create_and_Write_to_a_File)
+* [Append to a File](#Append_to_a_File)
+* [Concat File(s)](#Concat_Files)
+* [Open and Read a File](#Open_and_Read_a_File)
+* [Make a Directory](#Make_a_Directory)
+* [Create a Symbolic Link](#Create_a_Symbolic_Link)
+* [Rename a File/Directory](#Rename_a_FileDirectory)
+* [Delete a File/Directory](#Delete_a_FileDirectory)
+* [Truncate a File](#Truncate_a_File)
+* [Status of a File/Directory](#Status_of_a_FileDirectory)
+* [List a Directory](#List_a_Directory)
+* [Other File System Operations](#Other_File_System_Operations)
+* [Get Content Summary of a 
Directory](#Get_Content_Summary_of_a_Directory)
+* [Get File Checksum](#Get_File_Checksum)
+* [Get Home Directory](#Get_Home_Directory)
+* [Set Permission](#Set_Permission)
+* [Set Owner](#Set_Owner)
+* [Set Replication Factor](#Set_Replication_Factor)
+* [Set Access or Modification Time](#Set_Access_or_Modification_Time)
+* [Modify ACL Entries](#Modify_ACL_Entries)
+* [Remove ACL Entries](#Remove_ACL_Entries)
+* [Remove Default ACL](#Remove_Default_ACL)
+* [Remove ACL](#Remove_ACL)
+* [Set ACL](#Set_ACL)
+* [Get ACL Status](#Get_ACL_Status)
+* [Check access](#Check_access)
+* [Extended Attributes(XAttrs) 
Operations](#Extended_AttributesXAttrs_Operations)
+* [Set XAttr](#Set_XAttr)
+* [Remove XAttr](#Remove_XAttr)
+* [Get an XAttr](#Get_an_XAttr)
+* [Get multiple XAttrs](#Get_multiple_XAttrs)
+* [Get all XAttrs](#Get_all_XAttrs)
+* [List all XAttrs](#List_all_XAttrs)
+* [Snapshot Operations](#Snapshot_Operations)
+* [Create Snapshot](#Create_Snapshot)
+* [Delete Snapshot](#Delete_Snapshot)
+* [Rename Snapshot](#Rename_Snapshot)
+* [Delegation Token Operations](#Delegation_Token_Operations)
+* [Get Delegation Token](#Get_Delegation_Token)
+* [Get Delegation Tokens](#Get_Delegation_Tokens)
+* [Renew Delegation Token](#Renew_Delegation_Token)
+* [Cancel Delegation Token](#Cancel_Delegation_Token)
+* [Error Responses](#Error_Responses)
+* [HTTP Response Codes](#HTTP_Response_Codes)
+* [Illegal Argument Exception](#Illegal_Argument_Exception)
+* [Security Exception](#Security_Exception)
+* [Access Control Exception](#Access_Control_Exception)
+* [File Not Found Exception](#File_Not_Found_Exception)
+* [JSON Schemas](#JSON_Schemas)
+* [ACL Status JSON Schema](#ACL_Status_JSON_Schema)
+* [XAttrs JSON Schema](#XAttrs_JSON_Schema)
+* [XAttrNames JSON Schema](#XAttrNames_JSON_Schema)
+* [Boolean JSON Schema](#Boolean_JSON_Schema)
+* [ContentSummary JSON Schema](#ContentSummary_JSON_Schema)
+* [FileChecksum JSON Schema](#FileChecksum_JSON_Schema)
+* [FileStatus JSON Schema](#FileStatus_JSON_Schema)
+* [FileStatus Properties](#FileStatus_Properties)
+* [FileStatuses JSON Schema](#FileStatuses_JSON_Schema)
+* [Long JSON Schema](#Long_JSON_Schema)
+* [Path JSON Schema](#Path_JSON_Schema)
+* [RemoteException JSON Schema](#RemoteException_JSON_Schema)
+* [Token JSON Schema](#Token_JSON_Schema)
+ 

[33/50] [abbrv] hadoop git commit: HDFS-7776. Adding additional unit tests for Quota By Storage Type. (Contributed by Xiaoyu Yao)

2015-02-23 Thread zhz
HDFS-7776. Adding additional unit tests for Quota By Storage Type. (Contributed 
by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9c5d7ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9c5d7ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9c5d7ee

Branch: refs/heads/HDFS-EC
Commit: e9c5d7ee71cb617d6f4edc01e483cbb319c6511f
Parents: fa93278
Author: Arpit Agarwal a...@apache.org
Authored: Fri Feb 13 11:56:25 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 16 10:29:49 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  21 ++-
 .../server/namenode/TestQuotaByStorageType.java | 187 ++-
 2 files changed, 199 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c5d7ee/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6463010..7705f87 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -292,9 +292,6 @@ Trunk (Unreleased)
 HDFS-7721. The HDFS BlockScanner may run fast during the first hour
 (cmccabe)
 
-HDFS-7751. Fix TestHDFSCLI for quota with storage type.  (Xiaoyu Yao
-via szetszwo)
-
 HDFS-7670. HDFS Quota guide has typos, incomplete command lines
 (Brahma Reddy Battula via aw)
 
@@ -340,8 +337,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-6133. Add a feature for replica pinning so that a pinned replica
 will not be moved by Balancer/Mover.  (zhaoyunjiong via szetszwo)
 
-HDFS-7584. Enable Quota Support for Storage Types. (Xiaoyu Yao via
-Arpit Agarwal)
+HDFS-7584. Enable Quota Support for Storage Types (See breakdown of
+tasks below)
 
   IMPROVEMENTS
 
@@ -948,6 +945,20 @@ Release 2.7.0 - UNRELEASED
 HDFS-7704. DN heartbeat to Active NN may be blocked and expire if
 connection to Standby NN continues to time out (Rushabh Shah via kihwal)
 
+BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
+
+  HDFS-7720. Quota by Storage Type API, tools and ClientNameNode
+  Protocol changes. (Xiaoyu Yao via Arpit Agarwal)
+
+  HDFS-7723. Quota By Storage Type namenode implemenation. (Xiaoyu Yao
+  via Arpit Agarwal)
+
+  HDFS-7751. Fix TestHDFSCLI for quota with storage type.  (Xiaoyu Yao
+  via szetszwo)
+
+  HDFS-7776. Adding additional unit tests for Quota By Storage Type.
+  (Xiaoyu Yao via Arpit Agarwal)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c5d7ee/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
index b0d5d87..57f026d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 
   import static org.apache.hadoop.hdfs.StorageType.DEFAULT;
   import static org.junit.Assert.assertEquals;
-  import static org.junit.Assert.assertFalse;
   import static org.junit.Assert.assertTrue;
   import static org.junit.Assert.fail;
 
@@ -30,15 +29,18 @@ package org.apache.hadoop.hdfs.server.namenode;
   import org.apache.hadoop.hdfs.*;
   import org.apache.hadoop.hdfs.protocol.HdfsConstants;
   import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
+  import org.apache.hadoop.test.GenericTestUtils;
   import org.junit.After;
   import org.junit.Before;
   import org.junit.Test;
 
+  import java.io.IOException;
+
 public class TestQuotaByStorageType {
 
   private static final int BLOCKSIZE = 1024;
   private static final short REPLICATION = 3;
-  static final long seed = 0L;
+  private static final long seed = 0L;
   private static final Path dir = new Path(/TestQuotaByStorageType);
 
   private Configuration conf;
@@ -219,7 +221,6 @@ public class TestQuotaByStorageType {
 // Verify space consumed and remaining quota
 long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
 .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
-;
 assertEquals(file1Len, ssdConsumed);
 
 // move file from foo to bar
@@ -356,7 +357,6 @@ public class 

  1   2   >