Author: szetszwo Date: Tue Jul 29 00:49:14 2014 New Revision: 1614234 URL: http://svn.apache.org/r1614234 Log: Merge r1609845 through r1614231 from trunk.
Added: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java - copied unchanged from r1614231, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz - copied unchanged from r1614231, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-1-reserved.tgz - copied unchanged from r1614231, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-1-reserved.tgz Removed: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed) hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed) hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Propchange: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1612881-1614231 Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java Tue Jul 29 00:49:14 2014 @@ -104,6 +104,10 @@ public class RpcProgramMountd extends Rp @Override public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) { + if (hostsMatcher == null) { + return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, + null); + } AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client); if (accessPrivilege == AccessPrivilege.NONE) { return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, @@ -208,16 +212,23 @@ public class RpcProgramMountd extends Rp } else if (mntproc == MNTPROC.UMNTALL) { umntall(out, xid, client); } else if (mntproc == MNTPROC.EXPORT) { - // Currently only support one NFS export + // Currently only support one NFS export List<NfsExports> hostsMatchers = new ArrayList<NfsExports>(); - hostsMatchers.add(hostsMatcher); - out = MountResponse.writeExportList(out, xid, exports, hostsMatchers); + if (hostsMatcher != null) { + hostsMatchers.add(hostsMatcher); + out = MountResponse.writeExportList(out, xid, exports, hostsMatchers); + } else { + // This means there are no valid exports provided. + RpcAcceptedReply.getInstance(xid, + RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( + out); + } } else { // Invalid procedure RpcAcceptedReply.getInstance(xid, RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( out); - } + } ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer()); RpcResponse rsp = new RpcResponse(buf, info.remoteAddress()); RpcUtil.sendRpcResponse(ctx, rsp); Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java Tue Jul 29 00:49:14 2014 @@ -2123,8 +2123,11 @@ public class RpcProgramNfs3 extends RpcP if (!doPortMonitoring(remoteAddress)) { return false; } - + // Check export table + if (exports == null) { + return false; + } InetAddress client = ((InetSocketAddress) remoteAddress).getAddress(); AccessPrivilege access = exports.getAccessPrivilege(client); if (access == AccessPrivilege.NONE) { Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Jul 29 00:49:14 2014 @@ -272,6 +272,9 @@ Trunk (Unreleased) HDFS-5794. Fix the inconsistency of layout version number of ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9) + HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI. + (Vinayakumar B via wheat 9) + Release 2.6.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -334,6 +337,15 @@ Release 2.6.0 - UNRELEASED HDFS-6701. Make seed optional in NetworkTopology#sortByDistance. (Ashwin Shankar via wang) + HDFS-6755. There is an unnecessary sleep in the code path where + DFSOutputStream#close gives up its attempt to contact the namenode + (mitdesai21 via cmccabe) + + HDFS-6750. The DataNode should use its shared memory segment to mark + short-circuit replicas that have been unlinked as stale (cmccabe) + + HDFS-6739. Add getDatanodeStorageReport to ClientProtocol. (szetszwo) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) @@ -377,6 +389,25 @@ Release 2.6.0 - UNRELEASED HDFS-6731. Run "hdfs zkfc-formatZK" on a server in a non-namenode will cause a null pointer exception. (Masatake Iwasaki via brandonli) + HDFS-6114. Block Scan log rolling will never happen if blocks written + continuously leading to huge size of dncp_block_verification.log.curr + (vinayakumarb via cmccabe) + + HDFS-6455. NFS: Exception should be added in NFS log for invalid separator in + nfs.exports.allowed.hosts. (Abhiraj Butala via brandonli) + + HDFS-6715. Webhdfs wont fail over when it gets java.io.IOException: Namenode + is in startup mode. (jing9) + + HDFS-5919. FileJournalManager doesn't purge empty and corrupt inprogress edits + files (vinayakumarb) + + HDFS-6752. Avoid Address bind errors in TestDatanodeConfig#testMemlockLimit + (vinayakumarb) + + HDFS-6749. FSNamesystem methods should call resolvePath. + (Charles Lamb via cnauroth) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -399,6 +430,15 @@ Release 2.5.0 - UNRELEASED HDFS-6406. Add capability for NFS gateway to reject connections from unprivileged ports. (atm) + HDFS-2006. Ability to support storing extended attributes per file. + + HDFS-5978. Create a tool to take fsimage and expose read-only WebHDFS API. + (Akira Ajisaka via wheat9) + + HDFS-6278. Create HTML5-based UI for SNN. (wheat9) + + HDFS-6279. Create new index page for JN / DN. (wheat9) + IMPROVEMENTS HDFS-6007. Update documentation about short-circuit local reads (iwasakims @@ -416,9 +456,6 @@ Release 2.5.0 - UNRELEASED HDFS-6158. Clean up dead code for OfflineImageViewer. (wheat9) - HDFS-5978. Create a tool to take fsimage and expose read-only WebHDFS API. - (Akira Ajisaka via wheat9) - HDFS-6164. Remove lsr in OfflineImageViewer. (wheat9) HDFS-6167. Relocate the non-public API classes in the hdfs.client package. @@ -446,10 +483,6 @@ Release 2.5.0 - UNRELEASED HDFS-6265. Prepare HDFS codebase for JUnit 4.11. (cnauroth) - HDFS-6278. Create HTML5-based UI for SNN. (wheat9) - - HDFS-6279. Create new index page for JN / DN. (wheat9) - HDFS-5693. Few NN metrics data points were collected via JMX when NN is under heavy load. (Ming Ma via jing9) @@ -821,9 +854,6 @@ Release 2.5.0 - UNRELEASED HDFS-6464. Support multiple xattr.name parameters for WebHDFS getXAttrs. (Yi Liu via umamahesh) - HDFS-6375. Listing extended attributes with the search permission. - (Charles Lamb via wang) - HDFS-6539. test_native_mini_dfs is skipped in hadoop-hdfs/pom.xml (decstery via cmccabe) @@ -912,6 +942,27 @@ Release 2.5.0 - UNRELEASED HDFS-6703. NFS: Files can be deleted from a read-only mount (Srikanth Upputuri via brandonli) + HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return code + when xattr doesn't exist. (Charles Lamb via umamahesh) + + HDFS-6696. Name node cannot start if the path of a file under + construction contains ".snapshot". (wang) + + HDFS-6312. WebHdfs HA failover is broken on secure clusters. + (daryn via tucu) + + HDFS-6618. FSNamesystem#delete drops the FSN lock between removing INodes + from the tree and deleting them from the inode map (kihwal via cmccabe) + + HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal + via cmccabe) + + HDFS-6723. New NN webUI no longer displays decommissioned state for dead node. + (Ming Ma via wheat9) + + HDFS-6717. JIRA HDFS-5804 breaks default nfs-gateway behavior for unsecured config + (brandonli) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) @@ -981,15 +1032,6 @@ Release 2.5.0 - UNRELEASED HDFS-6492. Support create-time xattrs and atomically setting multiple xattrs. (wang) - HDFS-6312. WebHdfs HA failover is broken on secure clusters. - (daryn via tucu) - - HDFS-6618. FSNamesystem#delete drops the FSN lock between removing INodes - from the tree and deleting them from the inode map (kihwal via cmccabe) - - HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal - via cmccabe) - Release 2.4.1 - 2014-06-23 INCOMPATIBLE CHANGES Propchange: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1612881-1614231 Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java Tue Jul 29 00:49:14 2014 @@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.server.dat import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.shortcircuit.ClientMmap; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica; -import org.apache.hadoop.hdfs.util.DirectBufferPool; +import org.apache.hadoop.util.DirectBufferPool; import org.apache.hadoop.util.DataChecksum; import com.google.common.annotations.VisibleForTesting; Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java Tue Jul 29 00:49:14 2014 @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.E import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.shortcircuit.ClientMmap; -import org.apache.hadoop.hdfs.util.DirectBufferPool; +import org.apache.hadoop.util.DirectBufferPool; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.UserGroupInformation; Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Tue Jul 29 00:49:14 2014 @@ -32,19 +32,21 @@ import static org.apache.hadoop.hdfs.DFS import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY; @@ -60,8 +62,6 @@ import static org.apache.hadoop.hdfs.DFS import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT; import java.io.BufferedOutputStream; import java.io.DataInputStream; @@ -91,7 +91,6 @@ import java.util.concurrent.atomic.Atomi import javax.net.SocketFactory; -import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -112,22 +111,22 @@ import org.apache.hadoop.fs.MD5MD5CRC32C import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; import org.apache.hadoop.fs.Options; -import org.apache.hadoop.fs.XAttr; -import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.VolumeId; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; -import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.TcpPeerServer; +import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator; @@ -158,8 +157,8 @@ import org.apache.hadoop.hdfs.protocol.U import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.Op; import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; -import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; +import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; @@ -175,6 +174,7 @@ import org.apache.hadoop.hdfs.server.com import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.IOUtils; @@ -200,6 +200,7 @@ import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; import com.google.common.net.InetAddresses; /******************************************************** @@ -2192,6 +2193,11 @@ public class DFSClient implements java.i return namenode.getDatanodeReport(type); } + public DatanodeStorageReport[] getDatanodeStorageReport( + DatanodeReportType type) throws IOException { + return namenode.getDatanodeStorageReport(type); + } + /** * Enter, leave or get safe mode. * Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Tue Jul 29 00:49:14 2014 @@ -2136,12 +2136,12 @@ public class DFSOutputStream extends FSO throw new IOException(msg); } try { - Thread.sleep(localTimeout); if (retries == 0) { throw new IOException("Unable to close file because the last block" + " does not have enough number of replicas."); } retries--; + Thread.sleep(localTimeout); localTimeout *= 2; if (Time.now() - localstart > 5000) { DFSClient.LOG.info("Could not complete " + src + " retrying..."); Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Tue Jul 29 00:49:14 2014 @@ -24,6 +24,7 @@ import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; @@ -31,11 +32,10 @@ import org.apache.hadoop.fs.FileAlreadyE import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.Options; -import org.apache.hadoop.fs.XAttr; -import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.security.t import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.retry.AtMostOnce; @@ -655,6 +656,13 @@ public interface ClientProtocol { throws IOException; /** + * Get a report on the current datanode storages. + */ + @Idempotent + public DatanodeStorageReport[] getDatanodeStorageReport( + HdfsConstants.DatanodeReportType type) throws IOException; + + /** * Get the block size for the given file. * @param filename The name of the file * @return The number of bytes in each block @@ -1337,6 +1345,6 @@ public interface ClientProtocol { * @param xAttr <code>XAttr</code> to remove * @throws IOException */ - @Idempotent + @AtMostOnce public void removeXAttr(String src, XAttr xAttr) throws IOException; } Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java Tue Jul 29 00:49:14 2014 @@ -27,7 +27,7 @@ import java.nio.channels.ReadableByteCha import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.util.DirectBufferPool; +import org.apache.hadoop.util.DirectBufferPool; import org.apache.hadoop.io.IOUtils; import com.google.common.base.Preconditions; Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java Tue Jul 29 00:49:14 2014 @@ -72,6 +72,7 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto; @@ -93,6 +94,8 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; @@ -174,7 +177,6 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; -import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto; @@ -656,6 +658,21 @@ public class ClientNamenodeProtocolServe } @Override + public GetDatanodeStorageReportResponseProto getDatanodeStorageReport( + RpcController controller, GetDatanodeStorageReportRequestProto req) + throws ServiceException { + try { + List<DatanodeStorageReportProto> reports = PBHelper.convertDatanodeStorageReports( + server.getDatanodeStorageReport(PBHelper.convert(req.getType()))); + return GetDatanodeStorageReportResponseProto.newBuilder() + .addAllDatanodeStorageReports(reports) + .build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override public GetPreferredBlockSizeResponseProto getPreferredBlockSize( RpcController controller, GetPreferredBlockSizeRequestProto req) throws ServiceException { Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Tue Jul 29 00:49:14 2014 @@ -94,6 +94,7 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; @@ -151,6 +152,7 @@ import org.apache.hadoop.hdfs.security.t import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ProtobufHelper; @@ -581,6 +583,20 @@ public class ClientNamenodeProtocolTrans } @Override + public DatanodeStorageReport[] getDatanodeStorageReport(DatanodeReportType type) + throws IOException { + final GetDatanodeStorageReportRequestProto req + = GetDatanodeStorageReportRequestProto.newBuilder() + .setType(PBHelper.convert(type)).build(); + try { + return PBHelper.convertDatanodeStorageReports( + rpcProxy.getDatanodeStorageReport(null, req).getDatanodeStorageReportsList()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override public long getPreferredBlockSize(String filename) throws IOException, UnresolvedLinkException { GetPreferredBlockSizeRequestProto req = GetPreferredBlockSizeRequestProto Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java Tue Jul 29 00:49:14 2014 @@ -21,18 +21,13 @@ package org.apache.hadoop.hdfs.protocolP import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.HashMap; import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto; @@ -51,7 +46,6 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -61,14 +55,10 @@ import org.apache.hadoop.hdfs.server.pro import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RpcClientUtil; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -137,9 +127,7 @@ public class DatanodeProtocolClientSideT .setRegistration(PBHelper.convert(registration)) .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount) .setFailedVolumes(failedVolumes); - for (StorageReport r : reports) { - builder.addReports(PBHelper.convert(r)); - } + builder.addAllReports(PBHelper.convertStorageReports(reports)); if (cacheCapacity != 0) { builder.setCacheCapacity(cacheCapacity); } Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Tue Jul 29 00:49:14 2014 @@ -90,6 +90,7 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto; @@ -102,14 +103,11 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto; -import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto; -import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto; -import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; @@ -125,6 +123,8 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; @@ -149,6 +149,7 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto; @@ -182,6 +183,7 @@ import org.apache.hadoop.hdfs.server.pro import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; import org.apache.hadoop.hdfs.server.protocol.JournalInfo; import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; @@ -620,6 +622,41 @@ public class PBHelper { return builder.build(); } + public static DatanodeStorageReportProto convertDatanodeStorageReport( + DatanodeStorageReport report) { + return DatanodeStorageReportProto.newBuilder() + .setDatanodeInfo(convert(report.getDatanodeInfo())) + .addAllStorageReports(convertStorageReports(report.getStorageReports())) + .build(); + } + + public static List<DatanodeStorageReportProto> convertDatanodeStorageReports( + DatanodeStorageReport[] reports) { + final List<DatanodeStorageReportProto> protos + = new ArrayList<DatanodeStorageReportProto>(reports.length); + for(int i = 0; i < reports.length; i++) { + protos.add(convertDatanodeStorageReport(reports[i])); + } + return protos; + } + + public static DatanodeStorageReport convertDatanodeStorageReport( + DatanodeStorageReportProto proto) { + return new DatanodeStorageReport( + convert(proto.getDatanodeInfo()), + convertStorageReports(proto.getStorageReportsList())); + } + + public static DatanodeStorageReport[] convertDatanodeStorageReports( + List<DatanodeStorageReportProto> protos) { + final DatanodeStorageReport[] reports + = new DatanodeStorageReport[protos.size()]; + for(int i = 0; i < reports.length; i++) { + reports[i] = convertDatanodeStorageReport(protos.get(i)); + } + return reports; + } + public static AdminStates convert(AdminState adminState) { switch(adminState) { case DECOMMISSION_INPROGRESS: @@ -1717,6 +1754,15 @@ public class PBHelper { return report; } + public static List<StorageReportProto> convertStorageReports(StorageReport[] storages) { + final List<StorageReportProto> protos = new ArrayList<StorageReportProto>( + storages.length); + for(int i = 0; i < storages.length; i++) { + protos.add(convert(storages[i])); + } + return protos; + } + public static JournalInfo convert(JournalInfoProto info) { int lv = info.hasLayoutVersion() ? info.getLayoutVersion() : 0; int nsID = info.hasNamespaceID() ? info.getNamespaceID() : 0; Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java Tue Jul 29 00:49:14 2014 @@ -259,6 +259,15 @@ public class DatanodeDescriptor extends } } + public StorageReport[] getStorageReports() { + final StorageReport[] reports = new StorageReport[storageMap.size()]; + final DatanodeStorageInfo[] infos = getStorageInfos(); + for(int i = 0; i < infos.length; i++) { + reports[i] = infos[i].toStorageReport(); + } + return reports; + } + boolean hasStaleStorages() { synchronized (storageMap) { for (DatanodeStorageInfo storage : storageMap.values()) { Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java Tue Jul 29 00:49:14 2014 @@ -291,6 +291,12 @@ public class DatanodeStorageInfo { public String toString() { return "[" + storageType + "]" + storageID + ":" + state; } + + StorageReport toStorageReport() { + return new StorageReport( + new DatanodeStorage(storageID, state, storageType), + false, capacity, dfsUsed, remaining, blockPoolUsed); + } static Iterable<StorageType> toStorageTypes( final Iterable<DatanodeStorageInfo> infos) { Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Tue Jul 29 00:49:14 2014 @@ -84,6 +84,10 @@ class BlockPoolSliceScanner { private final SortedSet<BlockScanInfo> blockInfoSet = new TreeSet<BlockScanInfo>(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR); + + private final SortedSet<BlockScanInfo> newBlockInfoSet = + new TreeSet<BlockScanInfo>(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR); + private final GSet<Block, BlockScanInfo> blockMap = new LightWeightGSet<Block, BlockScanInfo>( LightWeightGSet.computeCapacity(0.5, "BlockMap")); @@ -195,7 +199,7 @@ class BlockPoolSliceScanner { BlockScanInfo info = new BlockScanInfo( block ); info.lastScanTime = scanTime--; //still keep 'info.lastScanType' to NONE. - addBlockInfo(info); + addBlockInfo(info, false); } RollingLogs rollingLogs = null; @@ -221,25 +225,42 @@ class BlockPoolSliceScanner { // Should we change throttler bandwidth every time bytesLeft changes? // not really required. } - - private synchronized void addBlockInfo(BlockScanInfo info) { - boolean added = blockInfoSet.add(info); + + /** + * Add the BlockScanInfo to sorted set of blockScanInfo + * @param info BlockScanInfo to be added + * @param isNewBlock true if the block is the new Block, false if + * BlockScanInfo is being updated with new scanTime + */ + private synchronized void addBlockInfo(BlockScanInfo info, + boolean isNewBlock) { + boolean added = false; + if (isNewBlock) { + // check whether the block already present + boolean exists = blockInfoSet.contains(info); + added = !exists && newBlockInfoSet.add(info); + } else { + added = blockInfoSet.add(info); + } blockMap.put(info); if (added) { updateBytesToScan(info.getNumBytes(), info.lastScanTime); } } - + private synchronized void delBlockInfo(BlockScanInfo info) { boolean exists = blockInfoSet.remove(info); + if (!exists){ + exists = newBlockInfoSet.remove(info); + } blockMap.remove(info); if (exists) { updateBytesToScan(-info.getNumBytes(), info.lastScanTime); } } - + /** Update blockMap by the given LogEntry */ private synchronized void updateBlockInfo(LogEntry e) { BlockScanInfo info = blockMap.get(new Block(e.blockId, 0, e.genStamp)); @@ -249,7 +270,7 @@ class BlockPoolSliceScanner { delBlockInfo(info); info.lastScanTime = e.verificationTime; info.lastScanType = ScanType.VERIFICATION_SCAN; - addBlockInfo(info); + addBlockInfo(info, false); } } @@ -275,14 +296,14 @@ class BlockPoolSliceScanner { info = new BlockScanInfo(block.getLocalBlock()); info.lastScanTime = getNewBlockScanTime(); - addBlockInfo(info); + addBlockInfo(info, true); adjustThrottler(); } /** Deletes the block from internal structures */ synchronized void deleteBlock(Block block) { BlockScanInfo info = blockMap.get(block); - if ( info != null ) { + if (info != null) { delBlockInfo(info); } } @@ -319,7 +340,7 @@ class BlockPoolSliceScanner { info.lastScanType = type; info.lastScanTime = now; info.lastScanOk = scanOk; - addBlockInfo(info); + addBlockInfo(info, false); // Don't update meta data if the verification failed. if (!scanOk) { @@ -578,7 +599,7 @@ class BlockPoolSliceScanner { delBlockInfo(info); info.lastScanTime = lastScanTime; lastScanTime += verifyInterval; - addBlockInfo(info); + addBlockInfo(info, false); } } } @@ -674,12 +695,21 @@ class BlockPoolSliceScanner { throw e; } finally { rollVerificationLogs(); + rollNewBlocksInfo(); if (LOG.isDebugEnabled()) { LOG.debug("Done scanning block pool: " + blockPoolId); } } } - + + // add new blocks to scan in next iteration + private synchronized void rollNewBlocksInfo() { + for (BlockScanInfo newBlock : newBlockInfoSet) { + blockInfoSet.add(newBlock); + } + newBlockInfoSet.clear(); + } + private synchronized void rollVerificationLogs() { if (verificationLog != null) { try { Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java Tue Jul 29 00:49:14 2014 @@ -74,7 +74,7 @@ import com.google.common.collect.HashMul * DN also marks the block's slots as "unanchorable" to prevent additional * clients from initiating these operations in the future. * - * The counterpart fo this class on the client is {@link DfsClientShmManager}. + * The counterpart of this class on the client is {@link DfsClientShmManager}. */ public class ShortCircuitRegistry { public static final Log LOG = LogFactory.getLog(ShortCircuitRegistry.class); @@ -217,7 +217,32 @@ public class ShortCircuitRegistry { } return allowMunlock; } - + + /** + * Invalidate any slot associated with a blockId that we are invalidating + * (deleting) from this DataNode. When a slot is invalid, the DFSClient will + * not use the corresponding replica for new read or mmap operations (although + * existing, ongoing read or mmap operations will complete.) + * + * @param blockId The block ID. + */ + public synchronized void processBlockInvalidation(ExtendedBlockId blockId) { + if (!enabled) return; + final Set<Slot> affectedSlots = slots.get(blockId); + if (!affectedSlots.isEmpty()) { + final StringBuilder bld = new StringBuilder(); + String prefix = ""; + bld.append("Block ").append(blockId).append(" has been invalidated. "). + append("Marking short-circuit slots as invalid: "); + for (Slot slot : affectedSlots) { + slot.makeInvalid(); + bld.append(prefix).append(slot.toString()); + prefix = ", "; + } + LOG.info(bld.toString()); + } + } + public static class NewShmInfo implements Closeable { public final ShmId shmId; public final FileInputStream stream; Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java Tue Jul 29 00:49:14 2014 @@ -44,6 +44,7 @@ import org.apache.commons.logging.LogFac import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; @@ -1232,8 +1233,15 @@ class FsDatasetImpl implements FsDataset } volumeMap.remove(bpid, invalidBlks[i]); } + + // If a DFSClient has the replica in its cache of short-circuit file + // descriptors (and the client is using ShortCircuitShm), invalidate it. + datanode.getShortCircuitRegistry().processBlockInvalidation( + new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid)); + // If the block is cached, start uncaching it. cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId()); + // Delete the block asynchronously to make sure we can do it fast enough. // It's ok to unlink the block file before the uncache operation // finishes. Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Tue Jul 29 00:49:14 2014 @@ -1074,10 +1074,11 @@ public class FSEditLog implements LogsPu logEdit(op); } - void logRemoveXAttrs(String src, List<XAttr> xAttrs) { + void logRemoveXAttrs(String src, List<XAttr> xAttrs, boolean toLogRpcIds) { final RemoveXAttrOp op = RemoveXAttrOp.getInstance(); op.src = src; op.xAttrs = xAttrs; + logRpcIds(op, toLogRpcIds); logEdit(op); } Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Tue Jul 29 00:49:14 2014 @@ -821,6 +821,10 @@ public class FSEditLogLoader { RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op; fsDir.unprotectedRemoveXAttrs(removeXAttrOp.src, removeXAttrOp.xAttrs); + if (toAddRetryCache) { + fsNamesys.addCacheEntry(removeXAttrOp.rpcClientId, + removeXAttrOp.rpcCallId); + } break; } default: Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Tue Jul 29 00:49:14 2014 @@ -3551,6 +3551,7 @@ public abstract class FSEditLogOp { XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in); src = p.getSrc(); xAttrs = PBHelper.convertXAttrs(p.getXAttrsList()); + readRpcIds(in, logVersion); } @Override @@ -3561,18 +3562,22 @@ public abstract class FSEditLogOp { } b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs)); b.build().writeDelimitedTo(out); + // clientId and callId + writeRpcIds(rpcClientId, rpcCallId, out); } @Override protected void toXml(ContentHandler contentHandler) throws SAXException { XMLUtils.addSaxString(contentHandler, "SRC", src); appendXAttrsToXml(contentHandler, xAttrs); + appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId); } @Override void fromXml(Stanza st) throws InvalidXmlException { src = st.getValue("SRC"); xAttrs = readXAttrsFromXml(st); + readRpcIdsFromXml(st); } } Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Tue Jul 29 00:49:14 2014 @@ -614,6 +614,16 @@ public class FSImageFormat { INodeDirectory parentINode = fsDir.rootDir; for (long i = 0; i < numFiles; i++) { pathComponents = FSImageSerialization.readPathComponents(in); + for (int j=0; j < pathComponents.length; j++) { + byte[] newComponent = renameReservedComponentOnUpgrade + (pathComponents[j], getLayoutVersion()); + if (!Arrays.equals(newComponent, pathComponents[j])) { + String oldPath = DFSUtil.byteArray2PathString(pathComponents); + pathComponents[j] = newComponent; + String newPath = DFSUtil.byteArray2PathString(pathComponents); + LOG.info("Renaming reserved path " + oldPath + " to " + newPath); + } + } final INode newNode = loadINode( pathComponents[pathComponents.length-1], false, in, counter); @@ -926,6 +936,7 @@ public class FSImageFormat { oldnode = namesystem.dir.getInode(cons.getId()).asFile(); inSnapshot = true; } else { + path = renameReservedPathsOnUpgrade(path, getLayoutVersion()); final INodesInPath iip = fsDir.getLastINodeInPath(path); oldnode = INodeFile.valueOf(iip.getINode(0), path); } Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Tue Jul 29 00:49:14 2014 @@ -62,6 +62,8 @@ import static org.apache.hadoop.hdfs.DFS import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY; @@ -83,9 +85,6 @@ import static org.apache.hadoop.hdfs.DFS import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT; - import static org.apache.hadoop.util.Time.now; import java.io.BufferedWriter; @@ -230,6 +229,7 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; @@ -3723,8 +3723,10 @@ public class FSNamesystem implements Nam StandbyException, IOException { FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { + src = FSDirectory.resolvePath(src, pathComponents, dir); checkOperation(OperationCategory.READ); if (isPermissionEnabled) { checkTraverse(pc, src); @@ -4917,6 +4919,28 @@ public class FSNamesystem implements Nam } } + DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type + ) throws AccessControlException, StandbyException { + checkSuperuserPrivilege(); + checkOperation(OperationCategory.UNCHECKED); + readLock(); + try { + checkOperation(OperationCategory.UNCHECKED); + final DatanodeManager dm = getBlockManager().getDatanodeManager(); + final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type); + + DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()]; + for (int i = 0; i < reports.length; i++) { + final DatanodeDescriptor d = datanodes.get(i); + reports[i] = new DatanodeStorageReport(new DatanodeInfo(d), + d.getStorageReports()); + } + return reports; + } finally { + readUnlock(); + } + } + /** * Save namespace image. * This will save current namespace into fsimage file and empty edits file. @@ -8186,9 +8210,11 @@ public class FSNamesystem implements Nam nnConf.checkAclsConfigFlag(); FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { checkOperation(OperationCategory.READ); + src = FSDirectory.resolvePath(src, pathComponents, dir); if (isPermissionEnabled) { checkPermission(pc, src, false, null, null, null, null); } @@ -8282,16 +8308,19 @@ public class FSNamesystem implements Nam nnConf.checkXAttrsConfigFlag(); FSPermissionChecker pc = getPermissionChecker(); boolean getAll = xAttrs == null || xAttrs.isEmpty(); - List<XAttr> filteredXAttrs = null; if (!getAll) { - filteredXAttrs = XAttrPermissionFilter.filterXAttrsForApi(pc, xAttrs); - if (filteredXAttrs.isEmpty()) { - return filteredXAttrs; + try { + XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs); + } catch (AccessControlException e) { + logAuditEvent(false, "getXAttrs", src); + throw e; } } checkOperation(OperationCategory.READ); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { + src = FSDirectory.resolvePath(src, pathComponents, dir); checkOperation(OperationCategory.READ); if (isPermissionEnabled) { checkPathAccess(pc, src, FsAction.READ); @@ -8305,15 +8334,21 @@ public class FSNamesystem implements Nam if (filteredAll == null || filteredAll.isEmpty()) { return null; } - List<XAttr> toGet = Lists.newArrayListWithCapacity(filteredXAttrs.size()); - for (XAttr xAttr : filteredXAttrs) { + List<XAttr> toGet = Lists.newArrayListWithCapacity(xAttrs.size()); + for (XAttr xAttr : xAttrs) { + boolean foundIt = false; for (XAttr a : filteredAll) { if (xAttr.getNameSpace() == a.getNameSpace() && xAttr.getName().equals(a.getName())) { toGet.add(a); + foundIt = true; break; } } + if (!foundIt) { + throw new IOException( + "At least one of the attributes provided was not found."); + } } return toGet; } @@ -8329,8 +8364,10 @@ public class FSNamesystem implements Nam nnConf.checkXAttrsConfigFlag(); final FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { + src = FSDirectory.resolvePath(src, pathComponents, dir); checkOperation(OperationCategory.READ); if (isPermissionEnabled) { /* To access xattr names, you need EXECUTE in the owning directory. */ @@ -8347,17 +8384,42 @@ public class FSNamesystem implements Nam readUnlock(); } } - + + /** + * Remove an xattr for a file or directory. + * + * @param src + * - path to remove the xattr from + * @param xAttr + * - xAttr to remove + * @throws AccessControlException + * @throws SafeModeException + * @throws UnresolvedLinkException + * @throws IOException + */ void removeXAttr(String src, XAttr xAttr) throws IOException { - nnConf.checkXAttrsConfigFlag(); - HdfsFileStatus resultingStat = null; - FSPermissionChecker pc = getPermissionChecker(); + CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + if (cacheEntry != null && cacheEntry.isSuccess()) { + return; // Return previous response + } + boolean success = false; try { - XAttrPermissionFilter.checkPermissionForApi(pc, xAttr); + removeXAttrInt(src, xAttr, cacheEntry != null); + success = true; } catch (AccessControlException e) { logAuditEvent(false, "removeXAttr", src); throw e; + } finally { + RetryCache.setState(cacheEntry, success); } + } + + void removeXAttrInt(String src, XAttr xAttr, boolean logRetryCache) + throws IOException { + nnConf.checkXAttrsConfigFlag(); + HdfsFileStatus resultingStat = null; + FSPermissionChecker pc = getPermissionChecker(); + XAttrPermissionFilter.checkPermissionForApi(pc, xAttr); checkOperation(OperationCategory.WRITE); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); writeLock(); @@ -8371,12 +8433,12 @@ public class FSNamesystem implements Nam xAttrs.add(xAttr); List<XAttr> removedXAttrs = dir.removeXAttrs(src, xAttrs); if (removedXAttrs != null && !removedXAttrs.isEmpty()) { - getEditLog().logRemoveXAttrs(src, removedXAttrs); + getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache); + } else { + throw new IOException( + "No matching attributes found for remove operation"); } resultingStat = getAuditFileInfo(src, false); - } catch (AccessControlException e) { - logAuditEvent(false, "removeXAttr", src); - throw e; } finally { writeUnlock(); } Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java Tue Jul 29 00:49:14 2014 @@ -71,6 +71,8 @@ public class FileJournalManager implemen NameNodeFile.EDITS.getName() + "_(\\d+)-(\\d+)"); private static final Pattern EDITS_INPROGRESS_REGEX = Pattern.compile( NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+)"); + private static final Pattern EDITS_INPROGRESS_STALE_REGEX = Pattern.compile( + NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+).*(\\S+)"); private File currentInProgress = null; @@ -162,8 +164,7 @@ public class FileJournalManager implemen throws IOException { LOG.info("Purging logs older than " + minTxIdToKeep); File[] files = FileUtil.listFiles(sd.getCurrentDir()); - List<EditLogFile> editLogs = - FileJournalManager.matchEditLogs(files); + List<EditLogFile> editLogs = matchEditLogs(files, true); for (EditLogFile log : editLogs) { if (log.getFirstTxId() < minTxIdToKeep && log.getLastTxId() < minTxIdToKeep) { @@ -244,8 +245,13 @@ public class FileJournalManager implemen public static List<EditLogFile> matchEditLogs(File logDir) throws IOException { return matchEditLogs(FileUtil.listFiles(logDir)); } - + static List<EditLogFile> matchEditLogs(File[] filesInStorage) { + return matchEditLogs(filesInStorage, false); + } + + private static List<EditLogFile> matchEditLogs(File[] filesInStorage, + boolean forPurging) { List<EditLogFile> ret = Lists.newArrayList(); for (File f : filesInStorage) { String name = f.getName(); @@ -256,6 +262,7 @@ public class FileJournalManager implemen long startTxId = Long.parseLong(editsMatch.group(1)); long endTxId = Long.parseLong(editsMatch.group(2)); ret.add(new EditLogFile(f, startTxId, endTxId)); + continue; } catch (NumberFormatException nfe) { LOG.error("Edits file " + f + " has improperly formatted " + "transaction ID"); @@ -270,12 +277,30 @@ public class FileJournalManager implemen long startTxId = Long.parseLong(inProgressEditsMatch.group(1)); ret.add( new EditLogFile(f, startTxId, HdfsConstants.INVALID_TXID, true)); + continue; } catch (NumberFormatException nfe) { LOG.error("In-progress edits file " + f + " has improperly " + "formatted transaction ID"); // skip } } + if (forPurging) { + // Check for in-progress stale edits + Matcher staleInprogressEditsMatch = EDITS_INPROGRESS_STALE_REGEX + .matcher(name); + if (staleInprogressEditsMatch.matches()) { + try { + long startTxId = Long.valueOf(staleInprogressEditsMatch.group(1)); + ret.add(new EditLogFile(f, startTxId, HdfsConstants.INVALID_TXID, + true)); + continue; + } catch (NumberFormatException nfe) { + LOG.error("In-progress stale edits file " + f + " has improperly " + + "formatted transaction ID"); + // skip + } + } + } } return ret; } Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Tue Jul 29 00:49:14 2014 @@ -115,6 +115,7 @@ import org.apache.hadoop.hdfs.server.pro import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; @@ -830,12 +831,24 @@ class NameNodeRpcServer implements Namen throws IOException { DatanodeInfo results[] = namesystem.datanodeReport(type); if (results == null ) { - throw new IOException("Cannot find datanode report"); + throw new IOException("Failed to get datanode report for " + type + + " datanodes."); } return results; } @Override // ClientProtocol + public DatanodeStorageReport[] getDatanodeStorageReport( + DatanodeReportType type) throws IOException { + final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type); + if (reports == null ) { + throw new IOException("Failed to get datanode storage report for " + type + + " datanodes."); + } + return reports; + } + + @Override // ClientProtocol public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException { OperationCategory opCategory = OperationCategory.UNCHECKED; Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java Tue Jul 29 00:49:14 2014 @@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.XAttrHelpe import org.apache.hadoop.security.AccessControlException; import com.google.common.collect.Lists; +import com.google.common.base.Preconditions; /** * There are four types of extended attributes <XAttr> defined by the @@ -60,8 +61,20 @@ public class XAttrPermissionFilter { throw new AccessControlException("User doesn't have permission for xattr: " + XAttrHelper.getPrefixName(xAttr)); } - - static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc, + + static void checkPermissionForApi(FSPermissionChecker pc, + List<XAttr> xAttrs) throws AccessControlException { + Preconditions.checkArgument(xAttrs != null); + if (xAttrs.isEmpty()) { + return; + } + + for (XAttr xAttr : xAttrs) { + checkPermissionForApi(pc, xAttr); + } + } + + static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc, List<XAttr> xAttrs) { assert xAttrs != null : "xAttrs can not be null"; if (xAttrs == null || xAttrs.isEmpty()) { Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1614234&r1=1614233&r2=1614234&view=diff ============================================================================== --- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original) +++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Tue Jul 29 00:49:14 2014 @@ -111,6 +111,7 @@ import org.apache.hadoop.hdfs.web.resour import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam; import org.apache.hadoop.hdfs.web.resources.XAttrValueParam; import org.apache.hadoop.io.Text; +import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException; import org.apache.hadoop.net.Node; @@ -188,7 +189,7 @@ public class NamenodeWebHdfsMethods { throws IOException { final NamenodeProtocols np = namenode.getRpcServer(); if (np == null) { - throw new IOException("Namenode is in startup mode"); + throw new RetriableException("Namenode is in startup mode"); } return np; }