Author: acmurthy Date: Tue Sep 25 18:21:41 2012 New Revision: 1390038 URL: http://svn.apache.org/viewvc?rev=1390038&view=rev Log: Merge -c 1379746 from branch-1 to branch-1.1 to fix HDFS-3871. Change DFSClient to use RetryUtils.
Modified: hadoop/common/branches/branch-1.1/CHANGES.txt hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Modified: hadoop/common/branches/branch-1.1/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/CHANGES.txt?rev=1390038&r1=1390037&r2=1390038&view=diff ============================================================================== --- hadoop/common/branches/branch-1.1/CHANGES.txt (original) +++ hadoop/common/branches/branch-1.1/CHANGES.txt Tue Sep 25 18:21:41 2012 @@ -192,6 +192,9 @@ Release 1.1.0 - 2012.09.16 HADOOP-8748. Refactor DFSClient retry utility methods to a new class in org.apache.hadoop.io.retry. Contributed by Arun C Murthy. + HDFS-3871. Change DFSClient to use RetryUtils. (Arun C Murthy + via szetszwo) + BUG FIXES HDFS-3696. Set chunked streaming mode in WebHdfsFileSystem write operations Modified: hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=1390038&r1=1390037&r2=1390038&view=diff ============================================================================== --- hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java (original) +++ hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Tue Sep 25 18:21:41 2012 @@ -21,6 +21,7 @@ import org.apache.hadoop.io.*; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryProxy; +import org.apache.hadoop.io.retry.RetryUtils; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ipc.*; @@ -124,98 +125,28 @@ public class DFSClient implements FSCons return (ClientProtocol)RPC.getProxy(ClientProtocol.class, ClientProtocol.versionID, nameNodeAddr, ugi, conf, NetUtils.getSocketFactory(conf, ClientProtocol.class), 0, - getMultipleLinearRandomRetry(conf)); - } - - /** - * Return the default retry policy used in RPC. - * - * If dfs.client.retry.policy.enabled == false, use TRY_ONCE_THEN_FAIL. - * - * Otherwise, - * (1) use multipleLinearRandomRetry for - * - SafeModeException, or - * - IOException other than RemoteException; and - * (2) use TRY_ONCE_THEN_FAIL for - * - non-SafeMode RemoteException, or - * - non-IOException. - * - * Note that dfs.client.retry.max < 0 is not allowed. - */ - public static RetryPolicy getDefaultRetryPolicy(Configuration conf) { - final RetryPolicy multipleLinearRandomRetry = getMultipleLinearRandomRetry(conf); - if (LOG.isDebugEnabled()) { - LOG.debug("multipleLinearRandomRetry = " + multipleLinearRandomRetry); - } - - if (multipleLinearRandomRetry == null) { - //no retry - return RetryPolicies.TRY_ONCE_THEN_FAIL; - } else { - //use exponential backoff - return new RetryPolicy() { - @Override - public boolean shouldRetry(Exception e, int retries) throws Exception { - //see (1) and (2) in the javadoc of this method. - final RetryPolicy p; - if (e instanceof RemoteException) { - final RemoteException re = (RemoteException)e; - p = SafeModeException.class.getName().equals(re.getClassName())? - multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL; - } else if (e instanceof IOException) { - p = multipleLinearRandomRetry; - } else { //non-IOException - p = RetryPolicies.TRY_ONCE_THEN_FAIL; - } - - if (LOG.isDebugEnabled()) { - LOG.debug("RETRY " + retries + ") policy=" - + p.getClass().getSimpleName() + ", exception=" + e); - } - return p.shouldRetry(e, retries); - } - - @Override - public String toString() { - return "RetryPolicy[" + multipleLinearRandomRetry + ", " - + RetryPolicies.TRY_ONCE_THEN_FAIL.getClass().getSimpleName() - + "]"; - } - }; - } - } - - /** - * Return the MultipleLinearRandomRetry policy specified in the conf, - * or null if the feature is disabled. - * If the policy is specified in the conf but the policy cannot be parsed, - * the default policy is returned. - * - * Conf property: N pairs of sleep-time and number-of-retries - * dfs.client.retry.policy = "s1,n1,s2,n2,..." - */ - private static RetryPolicy getMultipleLinearRandomRetry(Configuration conf) { - final boolean enabled = conf.getBoolean( - DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, - DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT); - if (!enabled) { - return null; + RetryUtils.getMultipleLinearRandomRetry( + conf, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT + )); } - final String policy = conf.get( - DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY, - DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT); - - final RetryPolicy r = RetryPolicies.MultipleLinearRandomRetry.parseCommaSeparatedString(policy); - return r != null? r: RetryPolicies.MultipleLinearRandomRetry.parseCommaSeparatedString( - DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT); - } - private static ClientProtocol createNamenode(ClientProtocol rpcNamenode, Configuration conf) throws IOException { //default policy - final RetryPolicy defaultPolicy = getDefaultRetryPolicy(conf); - + final RetryPolicy defaultPolicy = + RetryUtils.getDefaultRetryPolicy( + conf, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT, + SafeModeException.class + ); + //create policy RetryPolicy createPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep( 5, LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS); Modified: hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1390038&r1=1390037&r2=1390038&view=diff ============================================================================== --- hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original) +++ hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Tue Sep 25 18:21:41 2012 @@ -48,7 +48,6 @@ import org.apache.hadoop.fs.MD5MD5CRC32F import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.ByteRangeInputStream; -import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; @@ -83,6 +82,7 @@ import org.apache.hadoop.hdfs.web.resour import org.apache.hadoop.hdfs.web.resources.UserParam; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.io.retry.RetryUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; @@ -159,7 +159,15 @@ public class WebHdfsFileSystem extends F setConf(conf); this.nnAddr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort()); - this.retryPolicy = DFSClient.getDefaultRetryPolicy(conf); + this.retryPolicy = + RetryUtils.getDefaultRetryPolicy( + conf, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY, + DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT, + SafeModeException.class + ); this.workingDir = getHomeDirectory(); if (UserGroupInformation.isSecurityEnabled()) {