Author: subru
Date: Fri Jul 25 20:33:09 2014
New Revision: 1613514

URL: http://svn.apache.org/r1613514
Log:
syncing YARN-1051 branch with trunk

Added:
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestClientAccessPrivilege.java
      - copied unchanged from r1613507, 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestClientAccessPrivilege.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBootstrapStandbyWithBKJM.java
      - copied unchanged from r1613507, 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBootstrapStandbyWithBKJM.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/
      - copied from r1613507, 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java
      - copied unchanged from r1613507, 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsMultihoming.apt.vm
      - copied unchanged from r1613507, 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsMultihoming.apt.vm
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java
      - copied unchanged from r1613507, 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java
      - copied unchanged from r1613507, 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/
      - copied from r1613507, 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithSaslDataTransfer.java
      - copied unchanged from r1613507, 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithSaslDataTransfer.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz
      - copied unchanged from r1613507, 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-1-reserved.tgz
      - copied unchanged from r1613507, 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-1-reserved.tgz
Removed:
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java
Modified:
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/   (props 
changed)
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestReaddir.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/pom.xml
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ 
  (props changed)
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSClusterStats.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
   (props changed)
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
   (props changed)
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
    
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml

Propchange: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1609878-1613507

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
 Fri Jul 25 20:33:09 2014
@@ -104,6 +104,10 @@ public class RpcProgramMountd extends Rp
 
   @Override
   public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
+    if (hostsMatcher == null) {
+      return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
+          null);
+    }
     AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
     if (accessPrivilege == AccessPrivilege.NONE) {
       return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
@@ -208,16 +212,23 @@ public class RpcProgramMountd extends Rp
     } else if (mntproc == MNTPROC.UMNTALL) {
       umntall(out, xid, client);
     } else if (mntproc == MNTPROC.EXPORT) {
-      // Currently only support one NFS export 
+      // Currently only support one NFS export
       List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
-      hostsMatchers.add(hostsMatcher);
-      out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
+      if (hostsMatcher != null) {
+        hostsMatchers.add(hostsMatcher);
+        out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
+      } else {
+        // This means there are no valid exports provided.
+        RpcAcceptedReply.getInstance(xid,
+          RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
+          out);
+      }
     } else {
       // Invalid procedure
       RpcAcceptedReply.getInstance(xid,
           RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
           out);
-    }  
+    }
     ChannelBuffer buf = 
ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
     RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
     RpcUtil.sendRpcResponse(ctx, rsp);

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
 Fri Jul 25 20:33:09 2014
@@ -154,6 +154,8 @@ public class Nfs3Utils {
     if (isSet(mode, Nfs3Constant.ACCESS_MODE_EXECUTE)) {
       if (type == NfsFileType.NFSREG.toValue()) {
         rtn |= Nfs3Constant.ACCESS3_EXECUTE;
+      } else {
+        rtn |= Nfs3Constant.ACCESS3_LOOKUP;
       }
     }
     return rtn;

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
 Fri Jul 25 20:33:09 2014
@@ -1051,8 +1051,12 @@ public class RpcProgramNfs3 extends RpcP
   
   @Override
   public REMOVE3Response remove(XDR xdr, RpcInfo info) {
+    return remove(xdr, getSecurityHandler(info), info.remoteAddress());
+  }
+ 
+  @VisibleForTesting
+  REMOVE3Response remove(XDR xdr, SecurityHandler securityHandler, 
SocketAddress remoteAddress) {
     REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK);
-    SecurityHandler securityHandler = getSecurityHandler(info);
     DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
@@ -1083,17 +1087,19 @@ public class RpcProgramNfs3 extends RpcP
         return new REMOVE3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
+      WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
+          preOpDirAttr);
+      if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
+        return new REMOVE3Response(Nfs3Status.NFS3ERR_ACCES, errWcc);
+      }
+
       String fileIdPath = dirFileIdPath + "/" + fileName;
       HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath);
       if (fstat == null) {
-        WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
-            preOpDirAttr);
-        return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, dirWcc);
+        return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
       }
       if (fstat.isDir()) {
-        WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
-            preOpDirAttr);
-        return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, dirWcc);
+        return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, errWcc);
       }
 
       boolean result = dfsClient.delete(fileIdPath, false);
@@ -2117,8 +2123,11 @@ public class RpcProgramNfs3 extends RpcP
     if (!doPortMonitoring(remoteAddress)) {
       return false;
     }
-    
+
     // Check export table
+    if (exports == null) {
+        return false;
+    }
     InetAddress client = ((InetSocketAddress) remoteAddress).getAddress();
     AccessPrivilege access = exports.getAccessPrivilege(client);
     if (access == AccessPrivilege.NONE) {

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java
 Fri Jul 25 20:33:09 2014
@@ -68,5 +68,12 @@ public class TestNfs3Utils {
       0, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 4}, 
attr));
     assertEquals("No access should be allowed for dir as mode is 700 even 
though AuxGID does match",
       0, Nfs3Utils.getAccessRightsForUserGroup(3, 20, new int[] {5, 10}, 
attr));
+    
+    Mockito.when(attr.getUid()).thenReturn(2);
+    Mockito.when(attr.getGid()).thenReturn(10);
+    Mockito.when(attr.getMode()).thenReturn(457); // 711
+    Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue());
+    assertEquals("Access should be allowed for dir as mode is 711 and GID 
matches",
+        2 /* Lookup */, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] 
{5, 16, 11}, attr));
   }
 }

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestReaddir.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestReaddir.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestReaddir.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestReaddir.java
 Fri Jul 25 20:33:09 2014
@@ -72,11 +72,11 @@ public class TestReaddir {
   public static void setup() throws Exception {
     String currentUser = System.getProperty("user.name");
     config.set(
-            
DefaultImpersonationProvider.getProxySuperuserGroupConfKey(currentUser),
-            "*");
+            DefaultImpersonationProvider.getTestProvider().
+                getProxySuperuserGroupConfKey(currentUser), "*");
     config.set(
-            
DefaultImpersonationProvider.getProxySuperuserIpConfKey(currentUser),
-            "*");
+            DefaultImpersonationProvider.getTestProvider().
+                getProxySuperuserIpConfKey(currentUser), "*");
     ProxyUsers.refreshSuperUserGroupsConfiguration(config);
     cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
     cluster.waitActive();

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
 Fri Jul 25 20:33:09 2014
@@ -312,10 +312,12 @@ public class TestWrites {
         System.getProperty("user.name"));
     String currentUser = System.getProperty("user.name");
     config.set(
-            
DefaultImpersonationProvider.getProxySuperuserGroupConfKey(currentUser),
+            DefaultImpersonationProvider.getTestProvider().
+                getProxySuperuserGroupConfKey(currentUser),
             "*");
     config.set(
-            
DefaultImpersonationProvider.getProxySuperuserIpConfKey(currentUser),
+            DefaultImpersonationProvider.getTestProvider().
+                getProxySuperuserIpConfKey(currentUser),
             "*");
     ProxyUsers.refreshSuperUserGroupsConfiguration(config);
 

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
(original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
Fri Jul 25 20:33:09 2014
@@ -12,6 +12,8 @@ Trunk (Unreleased)
     HDFS-5570. Deprecate hftp / hsftp and replace them with webhdfs / swebhdfs.
     (wheat9)
 
+    HDFS-2538. option to disable fsck dots (Mohammad Kamrul Islam via aw)
+
   NEW FEATURES
 
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
@@ -250,6 +252,9 @@ Trunk (Unreleased)
     HDFS-5794. Fix the inconsistency of layout version number of 
     ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
 
+    HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI.
+    (Vinayakumar B via wheat 9)
+
 Release 2.6.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -284,8 +289,38 @@ Release 2.6.0 - UNRELEASED
 
     HDFS-3851. DFSOutputStream class code cleanup. (Jing Zhao via suresh)
 
+    HDFS-2856. Fix block protocol so that Datanodes don't require root or jsvc.
+    (cnauroth)
+
+    HDFS-5624. Add HDFS tests for ACLs in combination with viewfs.
+    (Stephen Chu via cnauroth)
+
+    HDFS-6655. Add 'header banner' to 'explorer.html' also in Namenode UI
+    (vinayakumarb)
+
+    HDFS-4120. Add a new "-skipSharedEditsCheck" option for BootstrapStandby
+    (Liang Xie and Rakesh R via vinayakumarb)
+
+    HDFS-6597. Add a new option to NN upgrade to terminate the process after
+    upgrade on NN is completed. (Danilo Vunjak via cnauroth)
+
+    HDFS-6700. BlockPlacementPolicy shoud choose storage but not datanode for
+    deletion. (szetszwo)
+
+    HDFS-6616. Add exclude-datanodes feature to WebHDFS redirection so that it
+    will not redirect retries to the same datanode. (zhaoyunjiong via szetszwo)
+
+    HDFS-6702. Change DFSClient to pass the StorageType from the namenode to
+    datanodes and change datanode to write block replicas using the specified
+    storage type. (szetszwo)
+
+    HDFS-6701. Make seed optional in NetworkTopology#sortByDistance.
+    (Ashwin Shankar via wang)
+
   OPTIMIZATIONS
 
+    HDFS-6690. Deduplicate xattr names in memory. (wang)
+
   BUG FIXES
 
     HDFS-6617. Flake 
TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin
@@ -300,6 +335,47 @@ Release 2.6.0 - UNRELEASED
     HADOOP-8158. Interrupting hadoop fs -put from the command line
     causes a LeaseExpiredException. (daryn via harsh)
 
+    HDFS-6678. MiniDFSCluster may still be partially running after 
initialization
+    fails. (cnauroth)
+
+    HDFS-5809. BlockPoolSliceScanner and high speed hdfs appending make
+    datanode to drop into infinite loop (cmccabe)
+
+    HDFS-6456. NFS should throw error for invalid entry in 
+    dfs.nfs.exports.allowed.hosts (Abhiraj Butala via brandonli)
+
+    HDFS-6689. NFS doesn't return correct lookup access for direcories 
(brandonli)
+
+    HDFS-6478. RemoteException can't be retried properly for non-HA scenario.
+    (Ming Ma via jing9)
+
+    HDFS-6693. TestDFSAdminWithHA fails on windows ( vinayakumarb )
+
+    HDFS-6667. In HDFS HA mode, Distcp/SLive with webhdfs on secure cluster 
fails
+    with Client cannot authenticate via:[TOKEN, KERBEROS] error. (jing9)
+
+    HDFS-6704. Fix the command to launch JournalNode in HDFS-HA document.
+    (Akira AJISAKA via jing9)
+
+    HDFS-6731. Run "hdfs zkfc-formatZK" on a server in a non-namenode will 
cause
+    a null pointer exception. (Masatake Iwasaki via brandonli)
+
+    HDFS-6114. Block Scan log rolling will never happen if blocks written
+    continuously leading to huge size of dncp_block_verification.log.curr
+    (vinayakumarb via cmccabe)
+
+    HDFS-6455. NFS: Exception should be added in NFS log for invalid separator 
in
+    nfs.exports.allowed.hosts. (Abhiraj Butala via brandonli)
+
+    HDFS-6715. Webhdfs wont fail over when it gets java.io.IOException: 
Namenode
+    is in startup mode. (jing9)
+
+    HDFS-5919. FileJournalManager doesn't purge empty and corrupt inprogress 
edits
+    files (vinayakumarb)
+
+    HDFS-6752. Avoid Address bind errors in TestDatanodeConfig#testMemlockLimit
+    (vinayakumarb)
+
 Release 2.5.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -322,6 +398,15 @@ Release 2.5.0 - UNRELEASED
     HDFS-6406. Add capability for NFS gateway to reject connections from
     unprivileged ports. (atm)
 
+    HDFS-2006. Ability to support storing extended attributes per file.
+
+    HDFS-5978. Create a tool to take fsimage and expose read-only WebHDFS API.
+    (Akira Ajisaka via wheat9)
+
+    HDFS-6278. Create HTML5-based UI for SNN. (wheat9)
+
+    HDFS-6279. Create new index page for JN / DN. (wheat9)
+
   IMPROVEMENTS
 
     HDFS-6007. Update documentation about short-circuit local reads (iwasakims
@@ -339,9 +424,6 @@ Release 2.5.0 - UNRELEASED
 
     HDFS-6158. Clean up dead code for OfflineImageViewer. (wheat9)
 
-    HDFS-5978. Create a tool to take fsimage and expose read-only WebHDFS API.
-    (Akira Ajisaka via wheat9)
-
     HDFS-6164. Remove lsr in OfflineImageViewer. (wheat9)
 
     HDFS-6167. Relocate the non-public API classes in the hdfs.client package.
@@ -369,10 +451,6 @@ Release 2.5.0 - UNRELEASED
 
     HDFS-6265. Prepare HDFS codebase for JUnit 4.11. (cnauroth)
 
-    HDFS-6278. Create HTML5-based UI for SNN. (wheat9)
-
-    HDFS-6279. Create new index page for JN / DN. (wheat9)
-
     HDFS-5693. Few NN metrics data points were collected via JMX when NN
     is under heavy load. (Ming Ma via jing9)
 
@@ -545,6 +623,11 @@ Release 2.5.0 - UNRELEASED
     HDFS-6493. Change dfs.namenode.startup.delay.block.deletion to second
     instead of millisecond. (Juan Yu via wang)
 
+    HDFS-6680. BlockPlacementPolicyDefault does not choose favored nodes
+    correctly.  (szetszwo) 
+
+    HDFS-6712. Document HDFS Multihoming Settings. (Arpit Agarwal)
+
   OPTIMIZATIONS
 
     HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
@@ -554,6 +637,8 @@ Release 2.5.0 - UNRELEASED
 
     HDFS-6583. Remove clientNode in FileUnderConstructionFeature. (wheat9)
 
+    HDFS-6599. 2.4 addBlock is 10 to 20 times slower compared to 0.23 (daryn)
+
   BUG FIXES 
 
     HDFS-6112. NFS Gateway docs are incorrect for allowed hosts configuration.
@@ -737,9 +822,6 @@ Release 2.5.0 - UNRELEASED
     HDFS-6464. Support multiple xattr.name parameters for WebHDFS getXAttrs.
     (Yi Liu via umamahesh)
 
-    HDFS-6375. Listing extended attributes with the search permission.
-    (Charles Lamb via wang)
-
     HDFS-6539. test_native_mini_dfs is skipped in hadoop-hdfs/pom.xml
     (decstery via cmccabe)
 
@@ -822,6 +904,30 @@ Release 2.5.0 - UNRELEASED
     HDFS-6647. Edit log corruption when pipeline recovery occurs for deleted 
     file present in snapshot (kihwal)
 
+    HDFS-6378. NFS registration should timeout instead of hanging when
+    portmap/rpcbind is not available (Abhiraj Butala via brandonli)
+
+    HDFS-6703. NFS: Files can be deleted from a read-only mount
+    (Srikanth Upputuri via brandonli)
+
+    HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return 
code 
+    when xattr doesn't exist. (Charles Lamb via umamahesh)
+
+    HDFS-6696. Name node cannot start if the path of a file under
+    construction contains ".snapshot". (wang)
+
+    HDFS-6312. WebHdfs HA failover is broken on secure clusters. 
+    (daryn via tucu)
+
+    HDFS-6618. FSNamesystem#delete drops the FSN lock between removing INodes
+    from the tree and deleting them from the inode map (kihwal via cmccabe)
+
+    HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal
+    via cmccabe)
+
+    HDFS-6723. New NN webUI no longer displays decommissioned state for dead 
node.
+    (Ming Ma via wheat9)
+
   BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
 
     HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via 
umamahesh)
@@ -891,15 +997,6 @@ Release 2.5.0 - UNRELEASED
     HDFS-6492. Support create-time xattrs and atomically setting multiple
     xattrs. (wang)
 
-    HDFS-6312. WebHdfs HA failover is broken on secure clusters. 
-    (daryn via tucu)
-
-    HDFS-6618. FSNamesystem#delete drops the FSN lock between removing INodes
-    from the tree and deleting them from the inode map (kihwal via cmccabe)
-
-    HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal
-    via cmccabe)
-
 Release 2.4.1 - 2014-06-23 
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
(original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
Fri Jul 25 20:33:09 2014
@@ -142,6 +142,11 @@ http://maven.apache.org/xsd/maven-4.0.0.
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minikdc</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>org.mockito</groupId>
       <artifactId>mockito-all</artifactId>
       <scope>test</scope>

Propchange: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged 
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1609878-1613507

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
 Fri Jul 25 20:33:09 2014
@@ -744,7 +744,8 @@ public class BlockReaderFactory implemen
       }
     }
     try {
-      Peer peer = remotePeerFactory.newConnectedPeer(inetSocketAddress);
+      Peer peer = remotePeerFactory.newConnectedPeer(inetSocketAddress, token,
+        datanode);
       if (LOG.isTraceEnabled()) {
         LOG.trace("nextTcpPeer: created newConnectedPeer " + peer);
       }

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
 Fri Jul 25 20:33:09 2014
@@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
 import org.apache.hadoop.util.DataChecksum;
 
 import com.google.common.annotations.VisibleForTesting;

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
 Fri Jul 25 20:33:09 2014
@@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.E
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.security.UserGroupInformation;

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 Fri Jul 25 20:33:09 2014
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
@@ -135,6 +137,7 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -152,16 +155,19 @@ import org.apache.hadoop.hdfs.protocol.S
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
 import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
 import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
+import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -208,7 +214,8 @@ import com.google.common.net.InetAddress
  *
  ********************************************************/
 @InterfaceAudience.Private
-public class DFSClient implements java.io.Closeable, RemotePeerFactory {
+public class DFSClient implements java.io.Closeable, RemotePeerFactory,
+    DataEncryptionKeyFactory {
   public static final Log LOG = LogFactory.getLog(DFSClient.class);
   public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; 
// 1 hour
   static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
@@ -232,7 +239,7 @@ public class DFSClient implements java.i
   private final Random r = new Random();
   private SocketAddress[] localInterfaceAddrs;
   private DataEncryptionKey encryptionKey;
-  final TrustedChannelResolver trustedChannelResolver;
+  final SaslDataTransferClient saslClient;
   private final CachingStrategy defaultReadCachingStrategy;
   private final CachingStrategy defaultWriteCachingStrategy;
   private final ClientContext clientContext;
@@ -634,7 +641,12 @@ public class DFSClient implements java.i
     if (numThreads > 0) {
       this.initThreadsNumForHedgedReads(numThreads);
     }
-    this.trustedChannelResolver = 
TrustedChannelResolver.getInstance(getConfiguration());
+    this.saslClient = new SaslDataTransferClient(
+      DataTransferSaslUtil.getSaslPropertiesResolver(conf),
+      TrustedChannelResolver.getInstance(conf),
+      conf.getBoolean(
+        IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
+        IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT));
   }
   
   /**
@@ -1804,23 +1816,6 @@ public class DFSClient implements java.i
                                      UnresolvedPathException.class);
      }
    }
-
-  /**
-   * Get the checksum of the whole file of a range of the file. Note that the
-   * range always starts from the beginning of the file.
-   * @param src The file path
-   * @param length The length of the range
-   * @return The checksum 
-   * @see DistributedFileSystem#getFileChecksum(Path)
-   */
-  public MD5MD5CRC32FileChecksum getFileChecksum(String src, long length)
-      throws IOException {
-    checkOpen();
-    Preconditions.checkArgument(length >= 0);
-    return getFileChecksum(src, length, clientName, namenode,
-        socketFactory, dfsClientConf.socketTimeout, getDataEncryptionKey(),
-        dfsClientConf.connectToDnViaHostname);
-  }
   
   @InterfaceAudience.Private
   public void clearDataEncryptionKey() {
@@ -1840,11 +1835,9 @@ public class DFSClient implements java.i
     return d == null ? false : d.getEncryptDataTransfer();
   }
   
-  @InterfaceAudience.Private
-  public DataEncryptionKey getDataEncryptionKey()
-      throws IOException {
-    if (shouldEncryptData() && 
-        !this.trustedChannelResolver.isTrusted()) {
+  @Override
+  public DataEncryptionKey newDataEncryptionKey() throws IOException {
+    if (shouldEncryptData()) {
       synchronized (this) {
         if (encryptionKey == null ||
             encryptionKey.expiryDate < Time.now()) {
@@ -1859,22 +1852,17 @@ public class DFSClient implements java.i
   }
 
   /**
-   * Get the checksum of the whole file or a range of the file.
+   * Get the checksum of the whole file of a range of the file. Note that the
+   * range always starts from the beginning of the file.
    * @param src The file path
    * @param length the length of the range, i.e., the range is [0, length]
-   * @param clientName the name of the client requesting the checksum.
-   * @param namenode the RPC proxy for the namenode
-   * @param socketFactory to create sockets to connect to DNs
-   * @param socketTimeout timeout to use when connecting and waiting for a 
response
-   * @param encryptionKey the key needed to communicate with DNs in this 
cluster
-   * @param connectToDnViaHostname whether the client should use hostnames 
instead of IPs
    * @return The checksum 
+   * @see DistributedFileSystem#getFileChecksum(Path)
    */
-  private static MD5MD5CRC32FileChecksum getFileChecksum(String src,
-      long length, String clientName, ClientProtocol namenode,
-      SocketFactory socketFactory, int socketTimeout,
-      DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
+  public MD5MD5CRC32FileChecksum getFileChecksum(String src, long length)
       throws IOException {
+    checkOpen();
+    Preconditions.checkArgument(length >= 0);
     //get block locations for the file range
     LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0,
         length);
@@ -1909,7 +1897,7 @@ public class DFSClient implements java.i
       final DatanodeInfo[] datanodes = lb.getLocations();
       
       //try each datanode location of the block
-      final int timeout = 3000 * datanodes.length + socketTimeout;
+      final int timeout = 3000 * datanodes.length + 
dfsClientConf.socketTimeout;
       boolean done = false;
       for(int j = 0; !done && j < datanodes.length; j++) {
         DataOutputStream out = null;
@@ -1917,8 +1905,7 @@ public class DFSClient implements java.i
         
         try {
           //connect to a datanode
-          IOStreamPair pair = connectToDN(socketFactory, 
connectToDnViaHostname,
-              encryptionKey, datanodes[j], timeout);
+          IOStreamPair pair = connectToDN(datanodes[j], timeout, lb);
           out = new DataOutputStream(new BufferedOutputStream(pair.out,
               HdfsConstants.SMALL_BUFFER_SIZE));
           in = new DataInputStream(pair.in);
@@ -1974,9 +1961,7 @@ public class DFSClient implements java.i
           } else {
             LOG.debug("Retrieving checksum from an earlier-version DataNode: " 
+
                       "inferring checksum by reading first byte");
-            ct = inferChecksumTypeByReading(
-                clientName, socketFactory, socketTimeout, lb, datanodes[j],
-                encryptionKey, connectToDnViaHostname);
+            ct = inferChecksumTypeByReading(lb, datanodes[j]);
           }
 
           if (i == 0) { // first block
@@ -2050,16 +2035,13 @@ public class DFSClient implements java.i
    * Connect to the given datanode's datantrasfer port, and return
    * the resulting IOStreamPair. This includes encryption wrapping, etc.
    */
-  private static IOStreamPair connectToDN(
-      SocketFactory socketFactory, boolean connectToDnViaHostname,
-      DataEncryptionKey encryptionKey, DatanodeInfo dn, int timeout)
-      throws IOException
-  {
+  private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
+      LocatedBlock lb) throws IOException {
     boolean success = false;
     Socket sock = null;
     try {
       sock = socketFactory.createSocket();
-      String dnAddr = dn.getXferAddr(connectToDnViaHostname);
+      String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
       if (LOG.isDebugEnabled()) {
         LOG.debug("Connecting to datanode " + dnAddr);
       }
@@ -2068,13 +2050,8 @@ public class DFSClient implements java.i
   
       OutputStream unbufOut = NetUtils.getOutputStream(sock);
       InputStream unbufIn = NetUtils.getInputStream(sock);
-      IOStreamPair ret;
-      if (encryptionKey != null) {
-        ret = DataTransferEncryptor.getEncryptedStreams(
-                unbufOut, unbufIn, encryptionKey);
-      } else {
-        ret = new IOStreamPair(unbufIn, unbufOut);        
-      }
+      IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, 
this,
+        lb.getBlockToken(), dn);
       success = true;
       return ret;
     } finally {
@@ -2090,21 +2067,14 @@ public class DFSClient implements java.i
    * with older HDFS versions which did not include the checksum type in
    * OpBlockChecksumResponseProto.
    *
-   * @param in input stream from datanode
-   * @param out output stream to datanode
    * @param lb the located block
-   * @param clientName the name of the DFSClient requesting the checksum
    * @param dn the connected datanode
    * @return the inferred checksum type
    * @throws IOException if an error occurs
    */
-  private static Type inferChecksumTypeByReading(
-      String clientName, SocketFactory socketFactory, int socketTimeout,
-      LocatedBlock lb, DatanodeInfo dn,
-      DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
+  private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
       throws IOException {
-    IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
-        encryptionKey, dn, socketTimeout);
+    IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);
 
     try {
       DataOutputStream out = new DataOutputStream(new 
BufferedOutputStream(pair.out,
@@ -2857,7 +2827,9 @@ public class DFSClient implements java.i
   }
 
   @Override // RemotePeerFactory
-  public Peer newConnectedPeer(InetSocketAddress addr) throws IOException {
+  public Peer newConnectedPeer(InetSocketAddress addr,
+      Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
+      throws IOException {
     Peer peer = null;
     boolean success = false;
     Socket sock = null;
@@ -2866,8 +2838,8 @@ public class DFSClient implements java.i
       NetUtils.connect(sock, addr,
         getRandomLocalInterfaceAddr(),
         dfsClientConf.socketTimeout);
-      peer = TcpPeerServer.peerFromSocketAndKey(sock, 
-          getDataEncryptionKey());
+      peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
+          blockToken, datanodeId);
       success = true;
       return peer;
     } finally {

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 Fri Jul 25 20:33:09 2014
@@ -214,6 +214,9 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY 
= "dfs.namenode.min.supported.datanode.version";
   public static final String  
DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT";
 
+  public static final String DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK 
= "dfs.namenode.randomize-block-locations-per-block";
+  public static final boolean 
DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT = false;
+
   public static final String  DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = 
"dfs.namenode.edits.dir.minimum";
   public static final int     DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
 
@@ -561,6 +564,8 @@ public class DFSConfigKeys extends Commo
   public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false;
   public static final String DFS_DATA_ENCRYPTION_ALGORITHM_KEY = 
"dfs.encrypt.data.transfer.algorithm";
   public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = 
"dfs.trustedchannel.resolver.class";
+  public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = 
"dfs.data.transfer.protection";
+  public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY = 
"dfs.data.transfer.saslproperties.resolver.class";
   
   // Journal-node related configs. These are read on the JN side.
   public static final String  DFS_JOURNALNODE_EDITS_DIR_KEY = 
"dfs.journalnode.edits.dir";

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 Fri Jul 25 20:33:09 2014
@@ -61,7 +61,6 @@ import org.apache.hadoop.hdfs.protocol.N
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
@@ -314,6 +313,7 @@ public class DFSOutputStream extends FSO
     private DataInputStream blockReplyStream;
     private ResponseProcessor response = null;
     private volatile DatanodeInfo[] nodes = null; // list of targets for 
current block
+    private volatile StorageType[] storageTypes = null;
     private volatile String[] storageIDs = null;
     private final LoadingCache<DatanodeInfo, DatanodeInfo> excludedNodes =
         CacheBuilder.newBuilder()
@@ -418,10 +418,12 @@ public class DFSOutputStream extends FSO
     }
     
     private void setPipeline(LocatedBlock lb) {
-      setPipeline(lb.getLocations(), lb.getStorageIDs());
+      setPipeline(lb.getLocations(), lb.getStorageTypes(), lb.getStorageIDs());
     }
-    private void setPipeline(DatanodeInfo[] nodes, String[] storageIDs) {
+    private void setPipeline(DatanodeInfo[] nodes, StorageType[] storageTypes,
+        String[] storageIDs) {
       this.nodes = nodes;
+      this.storageTypes = storageTypes;
       this.storageIDs = storageIDs;
     }
 
@@ -447,7 +449,7 @@ public class DFSOutputStream extends FSO
       this.setName("DataStreamer for file " + src);
       closeResponder();
       closeStream();
-      setPipeline(null, null);
+      setPipeline(null, null, null);
       stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
     }
     
@@ -1032,10 +1034,12 @@ public class DFSOutputStream extends FSO
       //transfer replica
       final DatanodeInfo src = d == 0? nodes[1]: nodes[d - 1];
       final DatanodeInfo[] targets = {nodes[d]};
-      transfer(src, targets, lb.getBlockToken());
+      final StorageType[] targetStorageTypes = {storageTypes[d]};
+      transfer(src, targets, targetStorageTypes, lb.getBlockToken());
     }
 
     private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets,
+        final StorageType[] targetStorageTypes,
         final Token<BlockTokenIdentifier> blockToken) throws IOException {
       //transfer replica to the new datanode
       Socket sock = null;
@@ -1047,21 +1051,17 @@ public class DFSOutputStream extends FSO
         
         OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
         InputStream unbufIn = NetUtils.getInputStream(sock);
-        if (dfsClient.shouldEncryptData() && 
-            
!dfsClient.trustedChannelResolver.isTrusted(sock.getInetAddress())) {
-          IOStreamPair encryptedStreams =
-              DataTransferEncryptor.getEncryptedStreams(
-                  unbufOut, unbufIn, dfsClient.getDataEncryptionKey());
-          unbufOut = encryptedStreams.out;
-          unbufIn = encryptedStreams.in;
-        }
+        IOStreamPair saslStreams = dfsClient.saslClient.socketSend(sock,
+          unbufOut, unbufIn, dfsClient, blockToken, src);
+        unbufOut = saslStreams.out;
+        unbufIn = saslStreams.in;
         out = new DataOutputStream(new BufferedOutputStream(unbufOut,
             HdfsConstants.SMALL_BUFFER_SIZE));
         in = new DataInputStream(unbufIn);
 
         //send the TRANSFER_BLOCK request
         new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
-            targets);
+            targets, targetStorageTypes);
         out.flush();
 
         //ack
@@ -1140,16 +1140,15 @@ public class DFSOutputStream extends FSO
           failed.add(nodes[errorIndex]);
 
           DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length-1];
-          System.arraycopy(nodes, 0, newnodes, 0, errorIndex);
-          System.arraycopy(nodes, errorIndex+1, newnodes, errorIndex,
-              newnodes.length-errorIndex);
+          arraycopy(nodes, newnodes, errorIndex);
+
+          final StorageType[] newStorageTypes = new 
StorageType[newnodes.length];
+          arraycopy(storageTypes, newStorageTypes, errorIndex);
 
           final String[] newStorageIDs = new String[newnodes.length];
-          System.arraycopy(storageIDs, 0, newStorageIDs, 0, errorIndex);
-          System.arraycopy(storageIDs, errorIndex+1, newStorageIDs, errorIndex,
-              newStorageIDs.length-errorIndex);
+          arraycopy(storageIDs, newStorageIDs, errorIndex);
           
-          setPipeline(newnodes, newStorageIDs);
+          setPipeline(newnodes, newStorageTypes, newStorageIDs);
 
           // Just took care of a node error while waiting for a node restart
           if (restartingNodeIndex >= 0) {
@@ -1186,7 +1185,7 @@ public class DFSOutputStream extends FSO
         
         // set up the pipeline again with the remaining nodes
         if (failPacket) { // for testing
-          success = createBlockOutputStream(nodes, newGS, isRecovery);
+          success = createBlockOutputStream(nodes, storageTypes, newGS, 
isRecovery);
           failPacket = false;
           try {
             // Give DNs time to send in bad reports. In real situations,
@@ -1195,7 +1194,7 @@ public class DFSOutputStream extends FSO
             Thread.sleep(2000);
           } catch (InterruptedException ie) {}
         } else {
-          success = createBlockOutputStream(nodes, newGS, isRecovery);
+          success = createBlockOutputStream(nodes, storageTypes, newGS, 
isRecovery);
         }
 
         if (restartingNodeIndex >= 0) {
@@ -1247,6 +1246,7 @@ public class DFSOutputStream extends FSO
     private LocatedBlock nextBlockOutputStream() throws IOException {
       LocatedBlock lb = null;
       DatanodeInfo[] nodes = null;
+      StorageType[] storageTypes = null;
       int count = dfsClient.getConf().nBlockWriteRetry;
       boolean success = false;
       ExtendedBlock oldBlock = block;
@@ -1269,11 +1269,12 @@ public class DFSOutputStream extends FSO
         bytesSent = 0;
         accessToken = lb.getBlockToken();
         nodes = lb.getLocations();
+        storageTypes = lb.getStorageTypes();
 
         //
         // Connect to first DataNode in the list.
         //
-        success = createBlockOutputStream(nodes, 0L, false);
+        success = createBlockOutputStream(nodes, storageTypes, 0L, false);
 
         if (!success) {
           DFSClient.LOG.info("Abandoning " + block);
@@ -1294,8 +1295,8 @@ public class DFSOutputStream extends FSO
     // connects to the first datanode in the pipeline
     // Returns true if success, otherwise return failure.
     //
-    private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS,
-        boolean recoveryFlag) {
+    private boolean createBlockOutputStream(DatanodeInfo[] nodes,
+        StorageType[] nodeStorageTypes, long newGS, boolean recoveryFlag) {
       if (nodes.length == 0) {
         DFSClient.LOG.info("nodes are empty for write pipeline of block "
             + block);
@@ -1325,14 +1326,10 @@ public class DFSOutputStream extends FSO
           
           OutputStream unbufOut = NetUtils.getOutputStream(s, writeTimeout);
           InputStream unbufIn = NetUtils.getInputStream(s);
-          if (dfsClient.shouldEncryptData()  && 
-              !dfsClient.trustedChannelResolver.isTrusted(s.getInetAddress())) 
{
-            IOStreamPair encryptedStreams =
-                DataTransferEncryptor.getEncryptedStreams(unbufOut,
-                    unbufIn, dfsClient.getDataEncryptionKey());
-            unbufOut = encryptedStreams.out;
-            unbufIn = encryptedStreams.in;
-          }
+          IOStreamPair saslStreams = dfsClient.saslClient.socketSend(s,
+            unbufOut, unbufIn, dfsClient, accessToken, nodes[0]);
+          unbufOut = saslStreams.out;
+          unbufIn = saslStreams.in;
           out = new DataOutputStream(new BufferedOutputStream(unbufOut,
               HdfsConstants.SMALL_BUFFER_SIZE));
           blockReplyStream = new DataInputStream(unbufIn);
@@ -1341,9 +1338,10 @@ public class DFSOutputStream extends FSO
           // Xmit header info to datanode
           //
   
+          BlockConstructionStage bcs = recoveryFlag? stage.getRecoveryStage(): 
stage;
           // send the request
-          new Sender(out).writeBlock(block, accessToken, dfsClient.clientName,
-              nodes, null, recoveryFlag? stage.getRecoveryStage() : stage, 
+          new Sender(out).writeBlock(block, nodeStorageTypes[0], accessToken,
+              dfsClient.clientName, nodes, nodeStorageTypes, null, bcs, 
               nodes.length, block.getNumBytes(), bytesSent, newGS, checksum,
               cachingStrategy.get());
   
@@ -2206,4 +2204,9 @@ public class DFSOutputStream extends FSO
   public long getFileId() {
     return fileId;
   }
+
+  private static <T> void arraycopy(T[] srcs, T[] dsts, int skipIndex) {
+    System.arraycopy(srcs, 0, dsts, 0, skipIndex);
+    System.arraycopy(srcs, skipIndex+1, dsts, skipIndex, 
dsts.length-skipIndex);
+  }
 }

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
 Fri Jul 25 20:33:09 2014
@@ -26,7 +26,6 @@ import static org.apache.hadoop.hdfs.pro
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
-import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
@@ -38,14 +37,13 @@ import org.apache.hadoop.HadoopIllegalAr
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
-import 
org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import 
org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
@@ -259,12 +257,11 @@ public class HAUtil {
   /**
    * Parse the file system URI out of the provided token.
    */
-  public static URI getServiceUriFromToken(final String scheme,
-                                           Token<?> token) {
+  public static URI getServiceUriFromToken(final String scheme, Token<?> 
token) {
     String tokStr = token.getService().toString();
-
-    if (tokStr.startsWith(HA_DT_SERVICE_PREFIX)) {
-      tokStr = tokStr.replaceFirst(HA_DT_SERVICE_PREFIX, "");
+    final String prefix = buildTokenServicePrefixForLogicalUri(scheme);
+    if (tokStr.startsWith(prefix)) {
+      tokStr = tokStr.replaceFirst(prefix, "");
     }
     return URI.create(scheme + "://" + tokStr);
   }
@@ -273,10 +270,13 @@ public class HAUtil {
    * Get the service name used in the delegation token for the given logical
    * HA service.
    * @param uri the logical URI of the cluster
+   * @param scheme the scheme of the corresponding FileSystem
    * @return the service name
    */
-  public static Text buildTokenServiceForLogicalUri(URI uri) {
-    return new Text(HA_DT_SERVICE_PREFIX + uri.getHost());
+  public static Text buildTokenServiceForLogicalUri(final URI uri,
+      final String scheme) {
+    return new Text(buildTokenServicePrefixForLogicalUri(scheme)
+        + uri.getHost());
   }
   
   /**
@@ -286,7 +286,11 @@ public class HAUtil {
   public static boolean isTokenForLogicalUri(Token<?> token) {
     return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX);
   }
-  
+
+  public static String buildTokenServicePrefixForLogicalUri(String scheme) {
+    return HA_DT_SERVICE_PREFIX + scheme + ":";
+  }
+
   /**
    * Locate a delegation token associated with the given HA cluster URI, and if
    * one is found, clone it to also represent the underlying namenode address.
@@ -298,7 +302,9 @@ public class HAUtil {
   public static void cloneDelegationTokenForLogicalUri(
       UserGroupInformation ugi, URI haUri,
       Collection<InetSocketAddress> nnAddrs) {
-    Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri);
+    // this cloning logic is only used by hdfs
+    Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
+        HdfsConstants.HDFS_URI_SCHEME);
     Token<DelegationTokenIdentifier> haToken =
         tokenSelector.selectToken(haService, ugi.getTokens());
     if (haToken != null) {
@@ -309,8 +315,9 @@ public class HAUtil {
         Token<DelegationTokenIdentifier> specificToken =
             new Token.PrivateToken<DelegationTokenIdentifier>(haToken);
         SecurityUtil.setTokenService(specificToken, singleNNAddr);
-        Text alias =
-            new Text(HA_DT_SERVICE_PREFIX + "//" + specificToken.getService());
+        Text alias = new Text(
+            buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME)
+                + "//" + specificToken.getService());
         ugi.addToken(alias, specificToken);
         LOG.debug("Mapped HA service delegation token for logical URI " +
             haUri + " to namenode " + singleNNAddr);

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
 Fri Jul 25 20:33:09 2014
@@ -163,7 +163,8 @@ public class NameNodeProxies {
 
       Text dtService;
       if (failoverProxyProvider.useLogicalURI()) {
-        dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
+        dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
+            HdfsConstants.HDFS_URI_SCHEME);
       } else {
         dtService = SecurityUtil.buildTokenService(
             NameNode.getAddress(nameNodeUri));
@@ -224,7 +225,8 @@ public class NameNodeProxies {
           new Class[] { xface }, dummyHandler);
       Text dtService;
       if (failoverProxyProvider.useLogicalURI()) {
-        dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
+        dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
+            HdfsConstants.HDFS_URI_SCHEME);
       } else {
         dtService = SecurityUtil.buildTokenService(
             NameNode.getAddress(nameNodeUri));
@@ -333,19 +335,18 @@ public class NameNodeProxies {
         address, conf, ugi, NamenodeProtocolPB.class, 0);
     if (withRetries) { // create the proxy with retries
       RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200,
-          TimeUnit.MILLISECONDS);
-      Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap 
-                     = new HashMap<Class<? extends Exception>, RetryPolicy>();
-      RetryPolicy methodPolicy = RetryPolicies.retryByException(timeoutPolicy,
-          exceptionToPolicyMap);
-      Map<String, RetryPolicy> methodNameToPolicyMap 
-                     = new HashMap<String, RetryPolicy>();
-      methodNameToPolicyMap.put("getBlocks", methodPolicy);
-      methodNameToPolicyMap.put("getAccessKeys", methodPolicy);
-      proxy = (NamenodeProtocolPB) RetryProxy.create(NamenodeProtocolPB.class,
-          proxy, methodNameToPolicyMap);
+              TimeUnit.MILLISECONDS);
+      Map<String, RetryPolicy> methodNameToPolicyMap
+           = new HashMap<String, RetryPolicy>();
+      methodNameToPolicyMap.put("getBlocks", timeoutPolicy);
+      methodNameToPolicyMap.put("getAccessKeys", timeoutPolicy);
+      NamenodeProtocol translatorProxy =
+          new NamenodeProtocolTranslatorPB(proxy);
+      return (NamenodeProtocol) RetryProxy.create(
+          NamenodeProtocol.class, translatorProxy, methodNameToPolicyMap);
+    } else {
+      return new NamenodeProtocolTranslatorPB(proxy);
     }
-    return new NamenodeProtocolTranslatorPB(proxy);
   }
   
   private static ClientProtocol createNNProxyWithClientProtocol(
@@ -379,29 +380,27 @@ public class NameNodeProxies {
                  = new HashMap<Class<? extends Exception>, RetryPolicy>();
       remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
           createPolicy);
-    
-      Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap
-                 = new HashMap<Class<? extends Exception>, RetryPolicy>();
-      exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
-          .retryByRemoteException(defaultPolicy,
-              remoteExceptionToPolicyMap));
-      RetryPolicy methodPolicy = RetryPolicies.retryByException(
-          defaultPolicy, exceptionToPolicyMap);
+
+      RetryPolicy methodPolicy = RetryPolicies.retryByRemoteException(
+          defaultPolicy, remoteExceptionToPolicyMap);
       Map<String, RetryPolicy> methodNameToPolicyMap 
                  = new HashMap<String, RetryPolicy>();
     
       methodNameToPolicyMap.put("create", methodPolicy);
-    
-      proxy = (ClientNamenodeProtocolPB) RetryProxy.create(
-          ClientNamenodeProtocolPB.class,
-          new DefaultFailoverProxyProvider<ClientNamenodeProtocolPB>(
-              ClientNamenodeProtocolPB.class, proxy),
+
+      ClientProtocol translatorProxy =
+        new ClientNamenodeProtocolTranslatorPB(proxy);
+      return (ClientProtocol) RetryProxy.create(
+          ClientProtocol.class,
+          new DefaultFailoverProxyProvider<ClientProtocol>(
+              ClientProtocol.class, translatorProxy),
           methodNameToPolicyMap,
           defaultPolicy);
+    } else {
+      return new ClientNamenodeProtocolTranslatorPB(proxy);
     }
-    return new ClientNamenodeProtocolTranslatorPB(proxy);
   }
-  
+
   private static Object createNameNodeProxy(InetSocketAddress address,
       Configuration conf, UserGroupInformation ugi, Class<?> xface,
       int rpcTimeout) throws IOException {

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java
 Fri Jul 25 20:33:09 2014
@@ -21,15 +21,21 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 
 import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.security.token.Token;
 
 public interface RemotePeerFactory {
   /**
    * @param addr          The address to connect to.
-   * 
+   * @param blockToken    Token used during optional SASL negotiation
+   * @param datanodeId    ID of destination DataNode
    * @return              A new Peer connected to the address.
    *
    * @throws IOException  If there was an error connecting or creating 
    *                      the remote socket, encrypted stream, etc.
    */
-  Peer newConnectedPeer(InetSocketAddress addr) throws IOException;
+  Peer newConnectedPeer(InetSocketAddress addr,
+      Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
+      throws IOException;
 }

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
 Fri Jul 25 20:33:09 2014
@@ -19,9 +19,7 @@ package org.apache.hadoop.hdfs.net;
 
 import java.io.IOException;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
-import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.net.unix.DomainSocket;
 
 import java.io.InputStream;
@@ -51,11 +49,8 @@ public class EncryptedPeer implements Pe
    */
   private final ReadableByteChannel channel;
 
-  public EncryptedPeer(Peer enclosedPeer, DataEncryptionKey key)
-      throws IOException {
+  public EncryptedPeer(Peer enclosedPeer, IOStreamPair ios) {
     this.enclosedPeer = enclosedPeer;
-    IOStreamPair ios = DataTransferEncryptor.getEncryptedStreams(
-        enclosedPeer.getOutputStream(), enclosedPeer.getInputStream(), key);
     this.in = ios.in;
     this.out = ios.out;
     this.channel = ios.in instanceof ReadableByteChannel ? 

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
 Fri Jul 25 20:33:09 2014
@@ -28,10 +28,14 @@ import java.nio.channels.SocketChannel;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
+import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import 
org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.security.token.Token;
 
 @InterfaceAudience.Private
 public class TcpPeerServer implements PeerServer {
@@ -74,15 +78,16 @@ public class TcpPeerServer implements Pe
     }
   }
 
-  public static Peer peerFromSocketAndKey(Socket s,
-        DataEncryptionKey key) throws IOException {
+  public static Peer peerFromSocketAndKey(
+        SaslDataTransferClient saslClient, Socket s,
+        DataEncryptionKeyFactory keyFactory,
+        Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
+        throws IOException {
     Peer peer = null;
     boolean success = false;
     try {
-      peer = peerFromSocket(s); 
-      if (key != null) {
-        peer = new EncryptedPeer(peer, key);
-      }
+      peer = peerFromSocket(s);
+      peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
       success = true;
       return peer;
     } finally {

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 Fri Jul 25 20:33:09 2014
@@ -1337,6 +1337,6 @@ public interface ClientProtocol {
    * @param xAttr <code>XAttr</code> to remove
    * @throws IOException
    */
-  @Idempotent
+  @AtMostOnce
   public void removeXAttr(String src, XAttr xAttr) throws IOException;
 }

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 Fri Jul 25 20:33:09 2014
@@ -339,7 +339,7 @@ public class DatanodeInfo extends Datano
     buffer.append("Cache Remaining: " +cr+ " 
("+StringUtils.byteDesc(cr)+")"+"\n");
     buffer.append("Cache Used%: "+percent2String(cacheUsedPercent) + "\n");
     buffer.append("Cache Remaining%: "+percent2String(cacheRemainingPercent) + 
"\n");
-
+    buffer.append("Xceivers: "+getXceiverCount()+"\n");
     buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
     return buffer.toString();
   }

Modified: 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 (original)
+++ 
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 Fri Jul 25 20:33:09 2014
@@ -124,7 +124,7 @@ public class HdfsConstants {
    * of a delgation token, indicating that the URI is a logical (HA)
    * URI.
    */
-  public static final String HA_DT_SERVICE_PREFIX = "ha-hdfs:";
+  public static final String HA_DT_SERVICE_PREFIX = "ha-";
 
 
   /**


Reply via email to