See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/505/
###################################################################################
########################## LAST 60 LINES OF THE CONSOLE
###########################
[...truncated 800544 lines...]
[junit] 2010-11-29 15:47:18,524 INFO hdfs.MiniDFSCluster
(MiniDFSCluster.java:shutdownDataNodes(770)) - Shutting down DataNode 0
[junit] 2010-11-29 15:47:18,625 INFO ipc.Server (Server.java:stop(1611)) -
Stopping server on 39949
[junit] 2010-11-29 15:47:18,626 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 0 on 39949: exiting
[junit] 2010-11-29 15:47:18,626 INFO ipc.Server (Server.java:run(675)) -
Stopping IPC Server Responder
[junit] 2010-11-29 15:47:18,626 WARN datanode.DataNode
(DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:40855,
storageID=DS-1880333671-127.0.1.1-40855-1291045627678, infoPort=46798,
ipcPort=39949):DataXceiveServer: java.nio.channels.AsynchronousCloseException
[junit] at
java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
[junit] at
sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
[junit] at
sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
[junit] at
org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
[junit] at java.lang.Thread.run(Thread.java:619)
[junit]
[junit] 2010-11-29 15:47:18,626 INFO ipc.Server (Server.java:run(475)) -
Stopping IPC Server listener on 39949
[junit] 2010-11-29 15:47:18,627 INFO datanode.DataNode
(DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads
is 0
[junit] 2010-11-29 15:47:18,714 INFO datanode.DataBlockScanner
(DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
[junit] 2010-11-29 15:47:18,727 INFO datanode.DataNode
(DataNode.java:run(1442)) - DatanodeRegistration(127.0.0.1:40855,
storageID=DS-1880333671-127.0.1.1-40855-1291045627678, infoPort=46798,
ipcPort=39949):Finishing DataNode in:
FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
[junit] 2010-11-29 15:47:18,728 INFO ipc.Server (Server.java:stop(1611)) -
Stopping server on 39949
[junit] 2010-11-29 15:47:18,728 INFO datanode.DataNode
(DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads
is 0
[junit] 2010-11-29 15:47:18,728 INFO datanode.FSDatasetAsyncDiskService
(FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk
service threads...
[junit] 2010-11-29 15:47:18,728 INFO datanode.FSDatasetAsyncDiskService
(FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads
have been shut down.
[junit] 2010-11-29 15:47:18,729 WARN datanode.FSDatasetAsyncDiskService
(FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already
shut down.
[junit] 2010-11-29 15:47:18,831 WARN namenode.DecommissionManager
(DecommissionManager.java:run(70)) - Monitor interrupted:
java.lang.InterruptedException: sleep interrupted
[junit] 2010-11-29 15:47:18,831 WARN namenode.FSNamesystem
(FSNamesystem.java:run(2822)) - ReplicationMonitor thread received
InterruptedException.java.lang.InterruptedException: sleep interrupted
[junit] 2010-11-29 15:47:18,831 INFO namenode.FSEditLog
(FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time
for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of
syncs: 3 SyncTimes(ms): 8 5
[junit] 2010-11-29 15:47:18,832 INFO ipc.Server (Server.java:stop(1611)) -
Stopping server on 46570
[junit] 2010-11-29 15:47:18,833 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 0 on 46570: exiting
[junit] 2010-11-29 15:47:18,833 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 2 on 46570: exiting
[junit] 2010-11-29 15:47:18,833 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 1 on 46570: exiting
[junit] 2010-11-29 15:47:18,833 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 3 on 46570: exiting
[junit] 2010-11-29 15:47:18,833 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 4 on 46570: exiting
[junit] 2010-11-29 15:47:18,834 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 7 on 46570: exiting
[junit] 2010-11-29 15:47:18,834 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 6 on 46570: exiting
[junit] 2010-11-29 15:47:18,833 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 5 on 46570: exiting
[junit] 2010-11-29 15:47:18,833 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 9 on 46570: exiting
[junit] 2010-11-29 15:47:18,833 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 8 on 46570: exiting
[junit] 2010-11-29 15:47:18,835 INFO ipc.Server (Server.java:run(475)) -
Stopping IPC Server listener on 46570
[junit] 2010-11-29 15:47:18,834 INFO ipc.Server (Server.java:run(675)) -
Stopping IPC Server Responder
[junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 71.934 sec
checkfailure:
[touch] Creating
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed
BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:722:
The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:488:
The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230:
The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:691:
The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:637:
The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:705:
Tests failed!
Total time: 251 minutes 38 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure
###################################################################################
############################## FAILED TESTS (if any)
##############################
18 tests failed.
REGRESSION: org.apache.hadoop.hdfs.TestFiHFlush.hFlushFi02_a
Error Message:
null
Stack Trace:
junit.framework.AssertionFailedError:
at
org.apache.hadoop.hdfs.TestFiHFlush.runDiskErrorTest(TestFiHFlush.java:56)
at
org.apache.hadoop.hdfs.TestFiHFlush.hFlushFi02_a(TestFiHFlush.java:114)
FAILED: org.apache.hadoop.hdfs.TestFileAppend4.testRecoverFinalizedBlock
Error Message:
test timed out after 60000 milliseconds
Stack Trace:
java.lang.Exception: test timed out after 60000 milliseconds
at java.io.FileInputStream.readBytes(Native Method)
at java.io.FileInputStream.read(FileInputStream.java:199)
at java.io.BufferedInputStream.read1(BufferedInputStream.java:256)
at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
at java.io.BufferedInputStream.fill(BufferedInputStream.java:218)
at java.io.BufferedInputStream.read1(BufferedInputStream.java:258)
at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
at
sun.security.provider.SeedGenerator$URLSeedGenerator.getSeedByte(SeedGenerator.java:453)
at
sun.security.provider.SeedGenerator.getSeedBytes(SeedGenerator.java:123)
at
sun.security.provider.SeedGenerator.generateSeed(SeedGenerator.java:118)
at
sun.security.provider.SecureRandom.engineGenerateSeed(SecureRandom.java:114)
at
sun.security.provider.SecureRandom.engineNextBytes(SecureRandom.java:171)
at java.security.SecureRandom.nextBytes(SecureRandom.java:433)
at java.security.SecureRandom.next(SecureRandom.java:455)
at java.util.Random.nextLong(Random.java:284)
at
org.mortbay.jetty.servlet.HashSessionIdManager.doStart(HashSessionIdManager.java:139)
at
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
at
org.mortbay.jetty.servlet.AbstractSessionManager.doStart(AbstractSessionManager.java:168)
at
org.mortbay.jetty.servlet.HashSessionManager.doStart(HashSessionManager.java:67)
at
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
at
org.mortbay.jetty.servlet.SessionHandler.doStart(SessionHandler.java:115)
at
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
at
org.mortbay.jetty.handler.HandlerWrapper.doStart(HandlerWrapper.java:130)
at
org.mortbay.jetty.handler.ContextHandler.startContext(ContextHandler.java:537)
at org.mortbay.jetty.servlet.Context.startContext(Context.java:136)
at
org.mortbay.jetty.webapp.WebAppContext.startContext(WebAppContext.java:1234)
at
org.mortbay.jetty.handler.ContextHandler.doStart(ContextHandler.java:517)
at
org.mortbay.jetty.webapp.WebAppContext.doStart(WebAppContext.java:460)
at
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
at
org.mortbay.jetty.handler.HandlerCollection.doStart(HandlerCollection.java:152)
at
org.mortbay.jetty.handler.ContextHandlerCollection.doStart(ContextHandlerCollection.java:156)
at
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
at
org.mortbay.jetty.handler.HandlerWrapper.doStart(HandlerWrapper.java:130)
at org.mortbay.jetty.Server.doStart(Server.java:222)
at
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
at org.apache.hadoop.http.HttpServer.start(HttpServer.java:618)
at
org.apache.hadoop.hdfs.server.namenode.NameNode$1.run(NameNode.java:516)
at
org.apache.hadoop.hdfs.server.namenode.NameNode$1.run(NameNode.java:461)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1115)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.startHttpServer(NameNode.java:461)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.activate(NameNode.java:405)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:389)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:578)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:571)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1534)
at
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:445)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
at
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
at
org.apache.hadoop.hdfs.TestFileAppend4.__CLR3_0_21z1ppcxui(TestFileAppend4.java:151)
at
org.apache.hadoop.hdfs.TestFileAppend4.testRecoverFinalizedBlock(TestFileAppend4.java:150)
FAILED:
org.apache.hadoop.hdfs.TestFileAppend4.testCompleteOtherLeaseHoldersFile
Error Message:
Cannot lock storage
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
The directory is already locked.
Stack Trace:
java.io.IOException: Cannot lock storage
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
The directory is already locked.
at
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
at
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1332)
at
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1350)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
at
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
at
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
at
org.apache.hadoop.hdfs.TestFileAppend4.__CLR3_0_269ddf9xvm(TestFileAppend4.java:222)
at
org.apache.hadoop.hdfs.TestFileAppend4.testCompleteOtherLeaseHoldersFile(TestFileAppend4.java:221)
FAILED:
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite
Error Message:
java.io.FileNotFoundException:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml
(Too many open files)
Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml
(Too many open files)
at
org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1523)
at
org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1388)
at
org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1334)
at org.apache.hadoop.conf.Configuration.set(Configuration.java:577)
at
org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:781)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqql(TestFileConcurrentReader.java:275)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml
(Too many open files)
at java.io.FileInputStream.open(Native Method)
at java.io.FileInputStream.<init>(FileInputStream.java:106)
at java.io.FileInputStream.<init>(FileInputStream.java:66)
at
sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
at
sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
at
com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
at
com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
at
com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
at
com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
at
com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
at
com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
at
com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
at
org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1437)
FAILED:
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer
Error Message:
Error while running command to get file permissions : java.io.IOException:
Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open
files at java.lang.ProcessBuilder.start(ProcessBuilder.java:459) at
org.apache.hadoop.util.Shell.runCommand(Shell.java:201) at
org.apache.hadoop.util.Shell.run(Shell.java:183) at
org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:376) at
org.apache.hadoop.util.Shell.execCommand(Shell.java:462) at
org.apache.hadoop.util.Shell.execCommand(Shell.java:445) at
org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)
at
org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)
at
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)
at
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
at
org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148) at
org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1577)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1555)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1501)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1468)
at
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:614)
at
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176) at
org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71) at
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
at junit.framework.TestCase.runBare(TestCase.java:132) at
junit.framework.TestResult$1.protect(TestResult.java:110) at
junit.framework.TestResult.runProtected(TestResult.java:128) at
junit.framework.TestResult.run(TestResult.java:113) at
junit.framework.TestCase.run(TestCase.java:124) at
junit.framework.TestSuite.runTest(TestSuite.java:232) at
junit.framework.TestSuite.run(TestSuite.java:227) at
org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)
at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39) at
org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
at
org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
at
org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open
files at java.lang.UNIXProcess.<init>(UNIXProcess.java:148) at
java.lang.ProcessImpl.start(ProcessImpl.java:65) at
java.lang.ProcessBuilder.start(ProcessBuilder.java:452) ... 34 more
Stack Trace:
java.lang.RuntimeException: Error while running command to get file permissions
: java.io.IOException: Cannot run program "/bin/ls": java.io.IOException:
error=24, Too many open files
at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
at org.apache.hadoop.util.Shell.runCommand(Shell.java:201)
at org.apache.hadoop.util.Shell.run(Shell.java:183)
at
org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:376)
at org.apache.hadoop.util.Shell.execCommand(Shell.java:462)
at org.apache.hadoop.util.Shell.execCommand(Shell.java:445)
at
org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)
at
org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)
at
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)
at
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
at
org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1577)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1555)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1501)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1468)
at
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:614)
at
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
at
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open
files
at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
at java.lang.ProcessImpl.start(ProcessImpl.java:65)
at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)
at
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:516)
at
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
at
org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1577)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1555)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1501)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1468)
at
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:614)
at
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
at
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
FAILED:
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite
Error Message:
Cannot lock storage
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
The directory is already locked.
Stack Trace:
java.io.IOException: Cannot lock storage
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
The directory is already locked.
at
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
at
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1332)
at
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1350)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
at
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
at
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
FAILED: org.apache.hadoop.hdfs.TestLargeBlock.testLargeBlockSize
Error Message:
Premeture EOF from inputStream
Stack Trace:
java.io.IOException: Premeture EOF from inputStream
at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:118)
at org.apache.hadoop.hdfs.BlockReader.readChunk(BlockReader.java:275)
at
org.apache.hadoop.fs.FSInputChecker.readChecksumChunk(FSInputChecker.java:273)
at org.apache.hadoop.fs.FSInputChecker.read1(FSInputChecker.java:225)
at org.apache.hadoop.fs.FSInputChecker.read(FSInputChecker.java:193)
at org.apache.hadoop.hdfs.BlockReader.read(BlockReader.java:117)
at
org.apache.hadoop.hdfs.DFSInputStream.readBuffer(DFSInputStream.java:477)
at org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:528)
at java.io.DataInputStream.readFully(DataInputStream.java:178)
at
org.apache.hadoop.hdfs.TestLargeBlock.checkFullFile(TestLargeBlock.java:142)
at
org.apache.hadoop.hdfs.TestLargeBlock.runTest(TestLargeBlock.java:210)
at
org.apache.hadoop.hdfs.TestLargeBlock.__CLR3_0_2y8q39ovr5(TestLargeBlock.java:171)
at
org.apache.hadoop.hdfs.TestLargeBlock.testLargeBlockSize(TestLargeBlock.java:169)
FAILED: org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0
Error Message:
Mismatched number of datanodes
Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
at
org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
at
org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_29j3j5brsi(TestBalancer.java:327)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0(TestBalancer.java:324)
FAILED: org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2
Error Message:
Mismatched number of datanodes
Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
at
org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
at
org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancerDefaultConstructor(TestBalancer.java:279)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancerDefaultConstructor(TestBalancer.java:376)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_2g13gq9rsr(TestBalancer.java:344)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2(TestBalancer.java:341)
FAILED:
org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End
Error Message:
Mismatched number of datanodes
Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
at
org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
at
org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.integrationTest(TestBalancer.java:319)
at
org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.__CLR3_0_2wspf0nr50(TestBlockTokenWithDFS.java:529)
at
org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End(TestBlockTokenWithDFS.java:526)
FAILED:
org.apache.hadoop.hdfs.server.namenode.TestSaveNamespace.testCrashWhileSavingSecondImage
Error Message:
NameNode is not formatted.
Stack Trace:
java.io.IOException: NameNode is not formatted.
at
org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:438)
at
org.apache.hadoop.hdfs.server.namenode.FSDirectory.loadFSImage(FSDirectory.java:149)
at
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:306)
at
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:284)
at
org.apache.hadoop.hdfs.server.namenode.TestSaveNamespace.saveNamespaceWithInjectedFault(TestSaveNamespace.java:139)
at
org.apache.hadoop.hdfs.server.namenode.TestSaveNamespace.__CLR3_0_2vpmvlky27(TestSaveNamespace.java:152)
at
org.apache.hadoop.hdfs.server.namenode.TestSaveNamespace.testCrashWhileSavingSecondImage(TestSaveNamespace.java:151)
FAILED:
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore
Error Message:
Image file
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
is corrupt with MD5 checksum of 8b52bfbc388439cd3b8db25e665e8e7e but expecting
1d0caa292d40b0b1928e85285448c4f4
Stack Trace:
java.io.IOException: Image file
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
is corrupt with MD5 checksum of 8b52bfbc388439cd3b8db25e665e8e7e but expecting
1d0caa292d40b0b1928e85285448c4f4
at
org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1062)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
at
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4td9(TestStorageRestore.java:316)
at
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedReplicas
Error Message:
null
Stack Trace:
java.lang.NullPointerException
at
org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2lttiju10ws(TestBlockRecovery.java:165)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedReplicas(TestBlockRecovery.java:153)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRbwReplicas
Error Message:
null
Stack Trace:
java.lang.NullPointerException
at
org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2c2lg1h10xa(TestBlockRecovery.java:204)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRbwReplicas(TestBlockRecovery.java:190)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRwrReplicas
Error Message:
null
Stack Trace:
java.lang.NullPointerException
at
org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_29tewcb10xt(TestBlockRecovery.java:243)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRwrReplicas(TestBlockRecovery.java:229)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBWReplicas
Error Message:
null
Stack Trace:
java.lang.NullPointerException
at
org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2cqk51310yc(TestBlockRecovery.java:281)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBWReplicas(TestBlockRecovery.java:269)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBW_RWRReplicas
Error Message:
null
Stack Trace:
java.lang.NullPointerException
at
org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2396azp10yp(TestBlockRecovery.java:305)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBW_RWRReplicas(TestBlockRecovery.java:293)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRWRReplicas
Error Message:
null
Stack Trace:
java.lang.NullPointerException
at
org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2ahdlbx10z1(TestBlockRecovery.java:329)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRWRReplicas(TestBlockRecovery.java:317)