Author: harsh
Date: Wed May 30 18:33:41 2012
New Revision: 1344394
URL: http://svn.apache.org/viewvc?rev=1344394&view=rev
Log:
HDFS-3476. Correct the default used in TestDFSClientRetries.busyTest() after
HDFS-3462. (harsh)
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1344394&r1=1344393&r2=1344394&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed May 30
18:33:41 2012
@@ -82,6 +82,9 @@ Trunk (unreleased changes)
HDFS-2391. Newly set BalancerBandwidth value is not displayed anywhere.
(harsh)
+ HDFS-3476. Correct the default used in TestDFSClientRetries.busyTest()
+ after HDFS-3462 (harsh)
+
OPTIMIZATIONS
HDFS-2834. Add a ByteBuffer-based read API to DFSInputStream.
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1344394&r1=1344393&r2=1344394&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
Wed May 30 18:33:41 2012
@@ -519,17 +519,20 @@ public class TestDFSClientRetries extend
LOG.info("Test 4 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0
+ " sec.");
}
- private boolean busyTest(int xcievers, int threads, int fileLen, int
timeWin, int retries)
+ private boolean busyTest(int xcievers, int threads, int fileLen, int
timeWin, int retries)
throws IOException {
boolean ret = true;
short replicationFactor = 1;
long blockSize = 128*1024*1024; // DFS block size
int bufferSize = 4096;
- int originalXcievers =
conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,0);
- conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY, xcievers);
- conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
- retries);
+ int originalXcievers = conf.getInt(
+ DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
+ DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT);
+ conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
+ xcievers);
+ conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
+ retries);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, timeWin);
// Disable keepalive
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 0);
@@ -605,7 +608,8 @@ public class TestDFSClientRetries extend
e.printStackTrace();
ret = false;
} finally {
-
conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,originalXcievers);
+ conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
+ originalXcievers);
fs.delete(file1, false);
cluster.shutdown();
}