See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/553/
###################################################################################
########################## LAST 60 LINES OF THE CONSOLE
###########################
[...truncated 642375 lines...]
[junit] at
org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
[junit] at
org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
[junit] at
org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
[junit] at
org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:445)
[junit] at
org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:633)
[junit] at
org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
[junit] at
org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
[junit] at
org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
[junit] at
org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
[junit] at java.lang.Thread.run(Thread.java:619)
[junit] Caused by: java.lang.InterruptedException: sleep interrupted
[junit] at java.lang.Thread.sleep(Native Method)
[junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
[junit] ... 11 more
[junit] 2011-01-15 12:42:07,178 INFO datanode.DataNode
(DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads
is 0
[junit] 2011-01-15 12:42:07,278 INFO datanode.DataBlockScanner
(DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
[junit] 2011-01-15 12:42:07,279 INFO datanode.DataNode
(DataNode.java:run(1459)) - DatanodeRegistration(127.0.0.1:39238,
storageID=DS-234553502-127.0.1.1-39238-1295095316012, infoPort=55922,
ipcPort=37784):Finishing DataNode in:
FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
[junit] 2011-01-15 12:42:07,279 INFO ipc.Server (Server.java:stop(1611)) -
Stopping server on 37784
[junit] 2011-01-15 12:42:07,279 INFO datanode.DataNode
(DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads
is 0
[junit] 2011-01-15 12:42:07,279 INFO datanode.FSDatasetAsyncDiskService
(FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk
service threads...
[junit] 2011-01-15 12:42:07,280 INFO datanode.FSDatasetAsyncDiskService
(FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads
have been shut down.
[junit] 2011-01-15 12:42:07,280 WARN datanode.FSDatasetAsyncDiskService
(FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already
shut down.
[junit] 2011-01-15 12:42:07,384 WARN namenode.FSNamesystem
(FSNamesystem.java:run(2828)) - ReplicationMonitor thread received
InterruptedException.java.lang.InterruptedException: sleep interrupted
[junit] 2011-01-15 12:42:07,384 WARN namenode.DecommissionManager
(DecommissionManager.java:run(70)) - Monitor interrupted:
java.lang.InterruptedException: sleep interrupted
[junit] 2011-01-15 12:42:07,385 INFO namenode.FSEditLog
(FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time
for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of
syncs: 3 SyncTimes(ms): 7 2
[junit] 2011-01-15 12:42:07,386 INFO ipc.Server (Server.java:stop(1611)) -
Stopping server on 39706
[junit] 2011-01-15 12:42:07,386 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 0 on 39706: exiting
[junit] 2011-01-15 12:42:07,386 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 1 on 39706: exiting
[junit] 2011-01-15 12:42:07,387 INFO ipc.Server (Server.java:run(675)) -
Stopping IPC Server Responder
[junit] 2011-01-15 12:42:07,387 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 3 on 39706: exiting
[junit] 2011-01-15 12:42:07,387 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 6 on 39706: exiting
[junit] 2011-01-15 12:42:07,388 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 2 on 39706: exiting
[junit] 2011-01-15 12:42:07,388 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 5 on 39706: exiting
[junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.759 sec
[junit] 2011-01-15 12:42:07,387 INFO ipc.Server (Server.java:run(475)) -
Stopping IPC Server listener on 39706
[junit] 2011-01-15 12:42:07,387 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 4 on 39706: exiting
[junit] 2011-01-15 12:42:07,387 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 8 on 39706: exiting
[junit] 2011-01-15 12:42:07,387 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 9 on 39706: exiting
[junit] 2011-01-15 12:42:07,387 INFO ipc.Server (Server.java:run(1444)) -
IPC Server handler 7 on 39706: exiting
checkfailure:
-run-test-hdfs-fault-inject-withtestcaseonly:
run-test-hdfs-fault-inject:
BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:735:
Tests failed!
Total time: 69 minutes 24 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure
###################################################################################
############################## FAILED TESTS (if any)
##############################
4 tests failed.
FAILED:
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer
Error Message:
Cannot lock storage
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
The directory is already locked.
Stack Trace:
java.io.IOException: Cannot lock storage
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
The directory is already locked.
at
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
at
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
at
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
at
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
at
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
FAILED:
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite
Error Message:
Cannot lock storage
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
The directory is already locked.
Stack Trace:
java.io.IOException: Cannot lock storage
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
The directory is already locked.
at
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
at
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
at
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
at
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
at
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
FAILED:
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite
Error Message:
Too many open files
Stack Trace:
java.io.IOException: Too many open files
at sun.nio.ch.IOUtil.initPipe(Native Method)
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
at
sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
at java.nio.channels.Selector.open(Selector.java:209)
at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
at
org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
at
org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
at
org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:421)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:512)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:282)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:264)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1575)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
at
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
at
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
at
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
FAILED:
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore
Error Message:
Image file
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
is corrupt with MD5 checksum of f9b6bde45cd1101ea2571eb68070707b but expecting
2970f17e638fc2cf7e05c80ceba530c5
Stack Trace:
java.io.IOException: Image file
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
is corrupt with MD5 checksum of f9b6bde45cd1101ea2571eb68070707b but expecting
2970f17e638fc2cf7e05c80ceba530c5
at
org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
at
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tkw(TestStorageRestore.java:316)
at
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)