Author: suresh Date: Wed Apr 17 18:11:31 2013 New Revision: 1469015 URL: http://svn.apache.org/r1469015 Log: HDFS-4695. TestEditLog leaks open file handles between tests. Contributed by Ivan Mitic.
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1469015&r1=1469014&r2=1469015&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Apr 17 18:11:31 2013 @@ -568,6 +568,9 @@ Release 2.0.5-beta - UNRELEASED HDFS-4639. startFileInternal() should not increment generation stamp. (Plamen Jeliazkov via shv) + HDFS-4695. TestEditLog leaks open file handles between tests. + (Ivan Mitic via suresh) + Release 2.0.4-alpha - UNRELEASED INCOMPATIBLE CHANGES Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1469015&r1=1469014&r2=1469015&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Wed Apr 17 18:11:31 2013 @@ -67,6 +67,7 @@ import org.apache.hadoop.hdfs.server.com import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; @@ -634,6 +635,7 @@ public class TestEditLog { // Now restore the backup FileUtil.fullyDeleteContents(dfsDir); + dfsDir.delete(); backupDir.renameTo(dfsDir); // Directory layout looks like: @@ -760,19 +762,24 @@ public class TestEditLog { File log = new File(currentDir, NNStorage.getInProgressEditsFileName(3)); - new EditLogFileOutputStream(log, 1024).create(); - if (!inBothDirs) { - break; - } - - NNStorage storage = new NNStorage(conf, - Collections.<URI>emptyList(), - Lists.newArrayList(uri)); - - if (updateTransactionIdFile) { - storage.writeTransactionIdFileToStorage(3); + EditLogFileOutputStream stream = new EditLogFileOutputStream(log, 1024); + try { + stream.create(); + if (!inBothDirs) { + break; + } + + NNStorage storage = new NNStorage(conf, + Collections.<URI>emptyList(), + Lists.newArrayList(uri)); + + if (updateTransactionIdFile) { + storage.writeTransactionIdFileToStorage(3); + } + storage.close(); + } finally { + stream.close(); } - storage.close(); } try { @@ -1335,12 +1342,15 @@ public class TestEditLog { FSEditLog editlog = getFSEditLog(storage); editlog.initJournalsForWrite(); long startTxId = 1; + Collection<EditLogInputStream> streams = null; try { - readAllEdits(editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL), - startTxId); + streams = editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL); + readAllEdits(streams, startTxId); } catch (IOException e) { LOG.error("edit log failover didn't work", e); fail("Edit log failover didn't work"); + } finally { + IOUtils.cleanup(null, streams.toArray(new EditLogInputStream[0])); } } @@ -1382,12 +1392,15 @@ public class TestEditLog { FSEditLog editlog = getFSEditLog(storage); editlog.initJournalsForWrite(); long startTxId = 1; + Collection<EditLogInputStream> streams = null; try { - readAllEdits(editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL), - startTxId); + streams = editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL); + readAllEdits(streams, startTxId); } catch (IOException e) { LOG.error("edit log failover didn't work", e); fail("Edit log failover didn't work"); + } finally { + IOUtils.cleanup(null, streams.toArray(new EditLogInputStream[0])); } }