Author: eli Date: Sat Jun 9 00:36:55 2012 New Revision: 1348284 URL: http://svn.apache.org/viewvc?rev=1348284&view=rev Log: HDFS-3515. Port HDFS-1457 to branch-1. Contributed by Eli Collins
Modified: hadoop/common/branches/branch-1/CHANGES.txt hadoop/common/branches/branch-1/src/hdfs/hdfs-default.xml hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockSender.java hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Modified: hadoop/common/branches/branch-1/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1348284&r1=1348283&r2=1348284&view=diff ============================================================================== --- hadoop/common/branches/branch-1/CHANGES.txt (original) +++ hadoop/common/branches/branch-1/CHANGES.txt Sat Jun 9 00:36:55 2012 @@ -8,6 +8,8 @@ Release 1.2.0 - unreleased IMPROVEMENTS + HDFS-3515. Port HDFS-1457 to branch-1. (eli) + BUG FIXES HADOOP-8445. Token should not print the password in toString Modified: hadoop/common/branches/branch-1/src/hdfs/hdfs-default.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/hdfs-default.xml?rev=1348284&r1=1348283&r2=1348284&view=diff ============================================================================== --- hadoop/common/branches/branch-1/src/hdfs/hdfs-default.xml (original) +++ hadoop/common/branches/branch-1/src/hdfs/hdfs-default.xml Sat Jun 9 00:36:55 2012 @@ -464,4 +464,14 @@ creations/deletions), or "all".</descrip </description> </property> +<property> + <name>dfs.image.transfer.bandwidthPerSec</name> + <value>0</value> + <description> + Specifies the maximum amount of bandwidth that can be utilized + for image transfer in term of the number of bytes per second. + A default value of 0 indicates that throttling is disabled. + </description> +</property> + </configuration> Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1348284&r1=1348283&r2=1348284&view=diff ============================================================================== --- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java (original) +++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java Sat Jun 9 00:36:55 2012 @@ -205,6 +205,10 @@ public class DFSConfigKeys extends Commo public static final String DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY = "dfs.client.read.shortcircuit.skip.checksum"; public static final boolean DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_DEFAULT = false; + public static final String DFS_IMAGE_TRANSFER_RATE_KEY = + "dfs.image.transfer.bandwidthPerSec"; + public static final long DFS_IMAGE_TRANSFER_RATE_DEFAULT = 0; //no throttling + //Keys with no defaults public static final String DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins"; public static final String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = "dfs.datanode.socket.write.timeout"; Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1348284&r1=1348283&r2=1348284&view=diff ============================================================================== --- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original) +++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Sat Jun 9 00:36:55 2012 @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.D import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck; +import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DataChecksum; @@ -68,7 +69,7 @@ class BlockReceiver implements java.io.C private String mirrorAddr; private DataOutputStream mirrorOut; private Daemon responder = null; - private BlockTransferThrottler throttler; + private DataTransferThrottler throttler; private FSDataset.BlockWriteStreams streams; private boolean isRecovery = false; private String clientName; @@ -506,7 +507,7 @@ class BlockReceiver implements java.io.C DataOutputStream mirrOut, // output to next datanode DataInputStream mirrIn, // input from next datanode DataOutputStream replyOut, // output to previous datanode - String mirrAddr, BlockTransferThrottler throttlerArg, + String mirrAddr, DataTransferThrottler throttlerArg, int numTargets) throws IOException { mirrorOut = mirrOut; Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockSender.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1348284&r1=1348283&r2=1348284&view=diff ============================================================================== --- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockSender.java (original) +++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockSender.java Sat Jun 9 00:36:55 2012 @@ -33,6 +33,7 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.SocketOutputStream; import org.apache.hadoop.util.ChecksumUtil; @@ -63,7 +64,7 @@ class BlockSender implements java.io.Clo private boolean transferToAllowed = true; private boolean blockReadFully; //set when the whole block is read private boolean verifyChecksum; //if true, check is verified while reading - private BlockTransferThrottler throttler; + private DataTransferThrottler throttler; private final String clientTraceFmt; // format of client trace log message private final MemoizedBlock memoizedBlock; @@ -381,7 +382,7 @@ class BlockSender implements java.io.Clo * @return total bytes reads, including crc. */ long sendBlock(DataOutputStream out, OutputStream baseStream, - BlockTransferThrottler throttler) throws IOException { + DataTransferThrottler throttler) throws IOException { if( out == null ) { throw new IOException( "out stream is null" ); } Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java?rev=1348284&r1=1348283&r2=1348284&view=diff ============================================================================== --- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java (original) +++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java Sat Jun 9 00:36:55 2012 @@ -49,6 +49,7 @@ import org.apache.hadoop.conf.Configurat import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.StringUtils; @@ -94,7 +95,7 @@ class DataBlockScanner implements Runnab Random random = new Random(); - BlockTransferThrottler throttler = null; + DataTransferThrottler throttler = null; private static enum ScanType { REMOTE_READ, // Verified when a block read by a client etc @@ -238,7 +239,7 @@ class DataBlockScanner implements Runnab } synchronized (this) { - throttler = new BlockTransferThrottler(200, MAX_SCAN_RATE); + throttler = new DataTransferThrottler(200, MAX_SCAN_RATE); } } Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java?rev=1348284&r1=1348283&r2=1348284&view=diff ============================================================================== --- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java (original) +++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java Sat Jun 9 00:36:55 2012 @@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configurat import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.server.balancer.Balancer; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.util.StringUtils; /** @@ -63,7 +64,7 @@ class DataXceiverServer implements Runna * It limits the number of block moves for balancing and * the total amount of bandwidth they can use. */ - static class BlockBalanceThrottler extends BlockTransferThrottler { + static class BlockBalanceThrottler extends DataTransferThrottler { private int numThreads; /**Constructor Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java?rev=1348284&r1=1348283&r2=1348284&view=diff ============================================================================== --- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java (original) +++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java Sat Jun 9 00:36:55 2012 @@ -38,6 +38,7 @@ import org.apache.commons.logging.LogFac import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.io.MD5Hash; +import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; @@ -82,11 +83,11 @@ public class GetImageServlet extends Htt if (ff.getImage()) { // send fsImage TransferFsImage.getFileServer(response.getOutputStream(), - nnImage.getFsImageName()); + nnImage.getFsImageName(), getThrottler(conf)); } else if (ff.getEdit()) { // send edits TransferFsImage.getFileServer(response.getOutputStream(), - nnImage.getFsEditName()); + nnImage.getFsEditName(), getThrottler(conf)); } else if (ff.putImage()) { synchronized (fsImageTransferLock) { final MD5Hash expectedChecksum = ff.getNewChecksum(); @@ -134,6 +135,22 @@ public class GetImageServlet extends Htt response.getOutputStream().close(); } } + + /** + * Construct a throttler from conf + * @param conf configuration + * @return a data transfer throttler + */ + private final DataTransferThrottler getThrottler(Configuration conf) { + long transferBandwidth = + conf.getLong(DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY, + DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_DEFAULT); + DataTransferThrottler throttler = null; + if (transferBandwidth > 0) { + throttler = new DataTransferThrottler(transferBandwidth); + } + return throttler; + } private boolean isValidRequestor(String remoteUser, Configuration conf) throws IOException { Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java?rev=1348284&r1=1348283&r2=1348284&view=diff ============================================================================== --- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java (original) +++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java Sat Jun 9 00:36:55 2012 @@ -30,6 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.ErrorSimulator; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.security.UserGroupInformation; @@ -125,7 +126,8 @@ class TransferFsImage implements FSConst * A server-side method to respond to a getfile http request * Copies the contents of the local file into the output stream. */ - static void getFileServer(OutputStream outstream, File localfile) + static void getFileServer(OutputStream outstream, File localfile, + DataTransferThrottler throttler) throws IOException { byte buf[] = new byte[BUFFER_SIZE]; FileInputStream infile = null; @@ -144,6 +146,9 @@ class TransferFsImage implements FSConst break; } outstream.write(buf, 0, num); + if (throttler != null) { + throttler.throttle(num); + } } } finally { if (infile != null) { Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=1348284&r1=1348283&r2=1348284&view=diff ============================================================================== --- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original) +++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Sat Jun 9 00:36:55 2012 @@ -45,9 +45,10 @@ import org.apache.hadoop.hdfs.protocol.F import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.server.common.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Util; -import org.apache.hadoop.hdfs.server.datanode.BlockTransferThrottler; +import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; + /** * This class tests if block replacement request to data nodes work correctly. */ @@ -63,7 +64,7 @@ public class TestBlockReplacement extend final long TOTAL_BYTES =6*bandwidthPerSec; long bytesToSend = TOTAL_BYTES; long start = Util.now(); - BlockTransferThrottler throttler = new BlockTransferThrottler(bandwidthPerSec); + DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec); long totalBytes = 0L; long bytesSent = 1024*512L; // 0.5MB throttler.throttle(bytesSent);