See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/508/
###################################################################################
########################## LAST 60 LINES OF THE CONSOLE
###########################
[...truncated 855880 lines...]
[junit] 2010-12-02 16:33:27,065 INFO net.NetworkTopology
(NetworkTopology.java:add(331)) - Adding a new node:
/default-rack/127.0.0.1:34336
[junit] 2010-12-02 16:33:27,069 INFO datanode.DataNode
(DataNode.java:register(697)) - New storage id
DS-1896102694-127.0.1.1-34336-1291307607063 is assigned to data-node
127.0.0.1:34336
[junit] 2010-12-02 16:33:27,070 INFO datanode.DataNode
(DataNode.java:run(1421)) - DatanodeRegistration(127.0.0.1:34336,
storageID=DS-1896102694-127.0.1.1-34336-1291307607063, infoPort=51649,
ipcPort=59557)In DataNode.run, data =
FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}
[junit] 2010-12-02 16:33:27,070 INFO ipc.Server (Server.java:run(608)) -
IPC Server Responder: starting
[junit] 2010-12-02 16:33:27,070 INFO datanode.DataNode
(DataNode.java:offerService(887)) - using BLOCKREPORT_INTERVAL of 21600000msec
Initial delay: 0msec
[junit] 2010-12-02 16:33:27,070 INFO ipc.Server (Server.java:run(1369)) -
IPC Server handler 0 on 59557: starting
[junit] 2010-12-02 16:33:27,070 INFO ipc.Server (Server.java:run(443)) -
IPC Server listener on 59557: starting
[junit] 2010-12-02 16:33:27,076 INFO datanode.DataNode
(DataNode.java:blockReport(1126)) - BlockReport of 0 blocks got processed in 2
msecs
[junit] 2010-12-02 16:33:27,076 INFO datanode.DataNode
(DataNode.java:offerService(929)) - Starting Periodic block scanner.
[junit] 2010-12-02 16:33:27,088 INFO FSNamesystem.audit
(FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1
cmd=create src=/pipeline_Fi_43/foo dst=null
perm=hudson:supergroup:rw-r--r--
[junit] 2010-12-02 16:33:27,089 INFO hdfs.DFSClientAspects
(DFSClientAspects.aj:ajc$before$org_apache_hadoop_hdfs_DFSClientAspects$5$5ba7280d(86))
- FI: before pipelineClose:
[junit] 2010-12-02 16:33:27,091 INFO hdfs.StateChange
(FSNamesystem.java:allocateBlock(1753)) - BLOCK* NameSystem.allocateBlock:
/pipeline_Fi_43/foo.
blk_-5987900848781850642_1001{blockUCState=UNDER_CONSTRUCTION,
primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:53619|RBW],
ReplicaUnderConstruction[127.0.0.1:34336|RBW],
ReplicaUnderConstruction[127.0.0.1:39706|RBW]]}
[junit] 2010-12-02 16:33:27,092 INFO protocol.ClientProtocolAspects
(ClientProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_protocol_ClientProtocolAspects$1$7076326d(35))
- FI: addBlock Pipeline[127.0.0.1:53619, 127.0.0.1:34336, 127.0.0.1:39706]
[junit] 2010-12-02 16:33:27,093 INFO datanode.DataTransferProtocolAspects
(DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51))
- FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:53619
[junit] 2010-12-02 16:33:27,093 INFO datanode.DataTransferProtocolAspects
(DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73))
- FI: receiverOpWriteBlock
[junit] 2010-12-02 16:33:27,093 INFO datanode.DataNode
(DataXceiver.java:opWriteBlock(257)) - Receiving block
blk_-5987900848781850642_1001 src: /127.0.0.1:35600 dest: /127.0.0.1:53619
[junit] 2010-12-02 16:33:27,095 INFO datanode.DataTransferProtocolAspects
(DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51))
- FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:34336
[junit] 2010-12-02 16:33:27,095 INFO datanode.DataTransferProtocolAspects
(DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73))
- FI: receiverOpWriteBlock
[junit] 2010-12-02 16:33:27,095 INFO datanode.DataNode
(DataXceiver.java:opWriteBlock(257)) - Receiving block
blk_-5987900848781850642_1001 src: /127.0.0.1:54758 dest: /127.0.0.1:34336
[junit] 2010-12-02 16:33:27,097 INFO datanode.DataTransferProtocolAspects
(DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51))
- FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:39706
[junit] 2010-12-02 16:33:27,097 INFO datanode.DataTransferProtocolAspects
(DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73))
- FI: receiverOpWriteBlock
[junit] 2010-12-02 16:33:27,097 INFO datanode.DataNode
(DataXceiver.java:opWriteBlock(257)) - Receiving block
blk_-5987900848781850642_1001 src: /127.0.0.1:50309 dest: /127.0.0.1:39706
[junit] 2010-12-02 16:33:27,098 INFO datanode.DataTransferProtocolAspects
(DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61))
- FI: statusRead SUCCESS, datanode=127.0.0.1:34336
[junit] 2010-12-02 16:33:27,098 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$1$4c211928(53))
- FI: callReceivePacket, datanode=127.0.0.1:39706
[junit] 2010-12-02 16:33:27,098 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$1$4c211928(53))
- FI: callReceivePacket, datanode=127.0.0.1:34336
[junit] 2010-12-02 16:33:27,098 INFO datanode.DataTransferProtocolAspects
(DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61))
- FI: statusRead SUCCESS, datanode=127.0.0.1:53619
[junit] 2010-12-02 16:33:27,099 INFO hdfs.DFSClientAspects
(DFSClientAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_DFSClientAspects$2$9396d2df(48))
- FI: after pipelineInitNonAppend: hasError=false errorIndex=-1
[junit] 2010-12-02 16:33:27,099 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$1$4c211928(53))
- FI: callReceivePacket, datanode=127.0.0.1:53619
[junit] 2010-12-02 16:33:27,100 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$1$4c211928(53))
- FI: callReceivePacket, datanode=127.0.0.1:53619
[junit] 2010-12-02 16:33:27,100 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$2$56c32214(71))
- FI: callWritePacketToDisk
[junit] 2010-12-02 16:33:27,100 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$1$4c211928(53))
- FI: callReceivePacket, datanode=127.0.0.1:34336
[junit] 2010-12-02 16:33:27,100 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$1$4c211928(53))
- FI: callReceivePacket, datanode=127.0.0.1:53619
[junit] 2010-12-02 16:33:27,100 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$2$56c32214(71))
- FI: callWritePacketToDisk
[junit] 2010-12-02 16:33:27,101 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$1$4c211928(53))
- FI: callReceivePacket, datanode=127.0.0.1:39706
[junit] 2010-12-02 16:33:27,101 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$2$56c32214(71))
- FI: callWritePacketToDisk
[junit] 2010-12-02 16:33:27,101 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$1$4c211928(53))
- FI: callReceivePacket, datanode=127.0.0.1:34336
[junit] 2010-12-02 16:33:27,101 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$8$9594fb70(205))
- FI: fiPipelineAck, datanode=DatanodeRegistration(127.0.0.1:34336,
storageID=DS-1896102694-127.0.1.1-34336-1291307607063, infoPort=51649,
ipcPort=59557)
[junit] 2010-12-02 16:33:27,101 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$1$4c211928(53))
- FI: callReceivePacket, datanode=127.0.0.1:39706
[junit] 2010-12-02 16:33:27,102 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$8$9594fb70(205))
- FI: fiPipelineAck, datanode=DatanodeRegistration(127.0.0.1:53619,
storageID=DS-2014057751-127.0.1.1-53619-1291307606796, infoPort=47703,
ipcPort=49272)
[junit] 2010-12-02 16:33:27,102 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$1$4c211928(53))
- FI: callReceivePacket, datanode=127.0.0.1:53619
[junit] 2010-12-02 16:33:27,102 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(184))
- FI: pipelineClose, datanode=127.0.0.1:53619, offsetInBlock=1, seqno=1,
lastPacketInBlock=true, len=0, endOfHeader=25
[junit] 2010-12-02 16:33:27,103 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$1$4c211928(53))
- FI: callReceivePacket, datanode=127.0.0.1:34336
[junit] 2010-12-02 16:33:27,103 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(184))
- FI: pipelineClose, datanode=127.0.0.1:34336, offsetInBlock=1, seqno=1,
lastPacketInBlock=true, len=0, endOfHeader=25
[junit] 2010-12-02 16:33:27,103 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$1$4c211928(53))
- FI: callReceivePacket, datanode=127.0.0.1:39706
[junit] 2010-12-02 16:33:27,103 INFO datanode.BlockReceiverAspects
(BlockReceiverAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(184))
- FI: pipelineClose, datanode=127.0.0.1:39706, offsetInBlock=1, seqno=1,
lastPacketInBlock=true, len=0, endOfHeader=25
[junit] 2010-12-02 16:33:27,104 INFO fi.FiTestUtil
(DataTransferTestUtil.java:run(344)) - FI: SleepAction:pipeline_Fi_43, index=2,
duration=[3000, 3001), datanode=127.0.0.1:39706
[junit] 2010-12-02 16:33:27,104 INFO fi.FiTestUtil
(FiTestUtil.java:sleep(92)) - DataXceiver for client /127.0.0.1:50309
[Receiving block blk_-5987900848781850642_1001 client=DFSClient_804440136]
sleeps for 3000ms
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Error updating JIRA issues. Saving issues for next build.
com.atlassian.jira.rpc.exception.RemotePermissionException: This issue does not
exist or you don't have permission to view it.
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure
###################################################################################
############################## FAILED TESTS (if any)
##############################
15 tests failed.
FAILED:
TEST-org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol.xml.<init>
Error Message:
Stack Trace:
Test report file
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/TEST-org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol.xml
was length 0
REGRESSION:
org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08
Error Message:
Was waiting too long for a replica to become TEMPORARY
Stack Trace:
junit.framework.AssertionFailedError: Was waiting too long for a replica to
become TEMPORARY
at
org.apache.hadoop.hdfs.server.datanode.TestBlockReport.waitForTempReplica(TestBlockReport.java:514)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2j2e00jqbq(TestBlockReport.java:408)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08(TestBlockReport.java:390)
FAILED: org.apache.hadoop.hdfs.TestFileAppend4.testRecoverFinalizedBlock
Error Message:
test timed out after 60000 milliseconds
Stack Trace:
java.lang.Exception: test timed out after 60000 milliseconds
at java.io.FileInputStream.readBytes(Native Method)
at java.io.FileInputStream.read(FileInputStream.java:199)
at java.io.BufferedInputStream.read1(BufferedInputStream.java:256)
at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
at java.io.BufferedInputStream.fill(BufferedInputStream.java:218)
at java.io.BufferedInputStream.read1(BufferedInputStream.java:258)
at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
at
sun.security.provider.SeedGenerator$URLSeedGenerator.getSeedByte(SeedGenerator.java:453)
at
sun.security.provider.SeedGenerator.getSeedBytes(SeedGenerator.java:123)
at
sun.security.provider.SeedGenerator.generateSeed(SeedGenerator.java:118)
at
sun.security.provider.SecureRandom.engineGenerateSeed(SecureRandom.java:114)
at
sun.security.provider.SecureRandom.engineNextBytes(SecureRandom.java:171)
at java.security.SecureRandom.nextBytes(SecureRandom.java:433)
at java.security.SecureRandom.next(SecureRandom.java:455)
at java.util.Random.nextLong(Random.java:284)
at
org.mortbay.jetty.servlet.HashSessionIdManager.doStart(HashSessionIdManager.java:139)
at
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
at
org.mortbay.jetty.servlet.AbstractSessionManager.doStart(AbstractSessionManager.java:168)
at
org.mortbay.jetty.servlet.HashSessionManager.doStart(HashSessionManager.java:67)
at
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
at
org.mortbay.jetty.servlet.SessionHandler.doStart(SessionHandler.java:115)
at
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
at
org.mortbay.jetty.handler.HandlerWrapper.doStart(HandlerWrapper.java:130)
at
org.mortbay.jetty.handler.ContextHandler.startContext(ContextHandler.java:537)
at org.mortbay.jetty.servlet.Context.startContext(Context.java:136)
at
org.mortbay.jetty.webapp.WebAppContext.startContext(WebAppContext.java:1234)
at
org.mortbay.jetty.handler.ContextHandler.doStart(ContextHandler.java:517)
at
org.mortbay.jetty.webapp.WebAppContext.doStart(WebAppContext.java:460)
at
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
at
org.mortbay.jetty.handler.HandlerCollection.doStart(HandlerCollection.java:152)
at
org.mortbay.jetty.handler.ContextHandlerCollection.doStart(ContextHandlerCollection.java:156)
at
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
at
org.mortbay.jetty.handler.HandlerWrapper.doStart(HandlerWrapper.java:130)
at org.mortbay.jetty.Server.doStart(Server.java:222)
at
org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
at org.apache.hadoop.http.HttpServer.start(HttpServer.java:618)
at
org.apache.hadoop.hdfs.server.namenode.NameNode$1.run(NameNode.java:516)
at
org.apache.hadoop.hdfs.server.namenode.NameNode$1.run(NameNode.java:461)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1115)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.startHttpServer(NameNode.java:461)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.activate(NameNode.java:405)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:389)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:578)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:571)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1534)
at
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:445)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
at
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
at
org.apache.hadoop.hdfs.TestFileAppend4.__CLR3_0_21z1ppcxud(TestFileAppend4.java:151)
at
org.apache.hadoop.hdfs.TestFileAppend4.testRecoverFinalizedBlock(TestFileAppend4.java:150)
FAILED:
org.apache.hadoop.hdfs.TestFileAppend4.testCompleteOtherLeaseHoldersFile
Error Message:
Cannot lock storage
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
The directory is already locked.
Stack Trace:
java.io.IOException: Cannot lock storage
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
The directory is already locked.
at
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
at
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1332)
at
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1350)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
at
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
at
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
at
org.apache.hadoop.hdfs.TestFileAppend4.__CLR3_0_269ddf9xvh(TestFileAppend4.java:222)
at
org.apache.hadoop.hdfs.TestFileAppend4.testCompleteOtherLeaseHoldersFile(TestFileAppend4.java:221)
FAILED:
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite
Error Message:
java.io.FileNotFoundException:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml
(Too many open files)
Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml
(Too many open files)
at
org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1523)
at
org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1388)
at
org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1334)
at org.apache.hadoop.conf.Configuration.set(Configuration.java:577)
at
org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:781)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqqo(TestFileConcurrentReader.java:275)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml
(Too many open files)
at java.io.FileInputStream.open(Native Method)
at java.io.FileInputStream.<init>(FileInputStream.java:106)
at java.io.FileInputStream.<init>(FileInputStream.java:66)
at
sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
at
sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
at
com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
at
com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
at
com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
at
com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
at
com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
at
com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
at
com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
at
org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1437)
FAILED: org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0
Error Message:
Mismatched number of datanodes
Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
at
org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
at
org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_29j3j5brsl(TestBalancer.java:327)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0(TestBalancer.java:324)
FAILED: org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2
Error Message:
Mismatched number of datanodes
Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
at
org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
at
org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancerDefaultConstructor(TestBalancer.java:279)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancerDefaultConstructor(TestBalancer.java:376)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_2g13gq9rsu(TestBalancer.java:344)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2(TestBalancer.java:341)
FAILED:
org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End
Error Message:
Mismatched number of datanodes
Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
at
org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
at
org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
at
org.apache.hadoop.hdfs.server.balancer.TestBalancer.integrationTest(TestBalancer.java:319)
at
org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.__CLR3_0_2wspf0nr53(TestBlockTokenWithDFS.java:529)
at
org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End(TestBlockTokenWithDFS.java:526)
FAILED:
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore
Error Message:
Image file
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
is corrupt with MD5 checksum of 881927486e0bcbbabc709cc7fe4a41f0 but expecting
bbcc683027935545878d892b4b7981cf
Stack Trace:
java.io.IOException: Image file
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
is corrupt with MD5 checksum of 881927486e0bcbbabc709cc7fe4a41f0 but expecting
bbcc683027935545878d892b4b7981cf
at
org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1062)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
at
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tdc(TestStorageRestore.java:316)
at
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedReplicas
Error Message:
null
Stack Trace:
java.lang.NullPointerException
at
org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2lttiju10wk(TestBlockRecovery.java:165)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedReplicas(TestBlockRecovery.java:153)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRbwReplicas
Error Message:
null
Stack Trace:
java.lang.NullPointerException
at
org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2c2lg1h10x2(TestBlockRecovery.java:204)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRbwReplicas(TestBlockRecovery.java:190)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRwrReplicas
Error Message:
null
Stack Trace:
java.lang.NullPointerException
at
org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_29tewcb10xl(TestBlockRecovery.java:243)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRwrReplicas(TestBlockRecovery.java:229)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBWReplicas
Error Message:
null
Stack Trace:
java.lang.NullPointerException
at
org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2cqk51310y4(TestBlockRecovery.java:281)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBWReplicas(TestBlockRecovery.java:269)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBW_RWRReplicas
Error Message:
null
Stack Trace:
java.lang.NullPointerException
at
org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2396azp10yh(TestBlockRecovery.java:305)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBW_RWRReplicas(TestBlockRecovery.java:293)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRWRReplicas
Error Message:
null
Stack Trace:
java.lang.NullPointerException
at
org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2ahdlbx10yt(TestBlockRecovery.java:329)
at
org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRWRReplicas(TestBlockRecovery.java:317)