See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-22-branch/5/
###################################################################################
########################## LAST 60 LINES OF THE CONSOLE
###########################
[...truncated 3369 lines...]
[junit] Running org.apache.hadoop.hdfs.TestFiHFlush
[junit] Tests run: 9, Failures: 0, Errors: 0, Time elapsed: 18.358 sec
[junit] Running org.apache.hadoop.hdfs.TestFiHftp
[junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 41.517 sec
[junit] Running org.apache.hadoop.hdfs.TestFiPipelines
[junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 5.936 sec
[junit] Running
org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol
[junit] Tests run: 29, Failures: 0, Errors: 0, Time elapsed: 220.64 sec
[junit] Running
org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2
[junit] Tests run: 10, Failures: 0, Errors: 0, Time elapsed: 439.793 sec
[junit] Running org.apache.hadoop.hdfs.server.datanode.TestFiPipelineClose
[junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.946 sec
checkfailure:
run-test-hdfs-excluding-commit-and-smoke:
[delete] Deleting directory
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-22-branch/trunk/build-fi/test/data
[mkdir] Created dir:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-22-branch/trunk/build-fi/test/data
[delete] Deleting directory
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-22-branch/trunk/build-fi/test/logs
[mkdir] Created dir:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-22-branch/trunk/build-fi/test/logs
[junit] WARNING: multiple versions of ant detected in path for junit
[junit]
jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
[junit] and
jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
[junit] Running org.apache.hadoop.fs.TestFiListPath
[junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 1.952 sec
[junit] Running org.apache.hadoop.fs.TestFiRename
[junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 6.082 sec
[junit] Running org.apache.hadoop.hdfs.TestFiHFlush
[junit] Tests run: 9, Failures: 0, Errors: 0, Time elapsed: 18.589 sec
[junit] Running org.apache.hadoop.hdfs.TestFiHftp
[junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 44.927 sec
[junit] Running org.apache.hadoop.hdfs.TestFiPipelines
[junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 5.904 sec
[junit] Running
org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol
[junit] Tests run: 29, Failures: 0, Errors: 0, Time elapsed: 220.621 sec
[junit] Running
org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2
[junit] Tests run: 10, Failures: 0, Errors: 0, Time elapsed: 414.015 sec
[junit] Running org.apache.hadoop.hdfs.server.datanode.TestFiPipelineClose
[junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.207 sec
checkfailure:
run-test-hdfs-all-withtestcaseonly:
run-test-hdfs:
BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-22-branch/trunk/build.xml:725:
Tests failed!
Total time: 104 minutes 47 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure
###################################################################################
############################## FAILED TESTS (if any)
##############################
4 tests failed.
REGRESSION: org.apache.hadoop.hdfs.TestHDFSTrash.testTrashEmptier
Error Message:
null
Stack Trace:
junit.framework.AssertionFailedError: null
at org.apache.hadoop.fs.TestTrash.testTrashEmptier(TestTrash.java:473)
at junit.extensions.TestDecorator.basicRun(TestDecorator.java:24)
at junit.extensions.TestSetup$1.protect(TestSetup.java:23)
at junit.extensions.TestSetup.run(TestSetup.java:27)
FAILED:
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer
Error Message:
Too many open files
Stack Trace:
java.io.IOException: Too many open files
at sun.nio.ch.EPollArrayWrapper.epollCreate(Native Method)
at sun.nio.ch.EPollArrayWrapper.<init>(EPollArrayWrapper.java:68)
at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:52)
at
sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
at java.nio.channels.Selector.open(Selector.java:209)
at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
at org.apache.hadoop.ipc.Server.<init>(Server.java:1501)
at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:394)
at
org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:331)
at
org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:291)
at
org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
at
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
at
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:614)
at
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
at
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
FAILED:
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite
Error Message:
Cannot lock storage
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-22-branch/trunk/build/test/data/dfs/name1.
The directory is already locked.
Stack Trace:
java.io.IOException: Cannot lock storage
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-22-branch/trunk/build/test/data/dfs/name1.
The directory is already locked.
at
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
at
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1332)
at
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1350)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1403)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:201)
at
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
at
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
at
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
FAILED:
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore
Error Message:
Image file
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-22-branch/trunk/build/test/data/dfs/secondary/current/fsimage
is corrupt with MD5 checksum of a16b74ef257941883d505cb50a11adf1 but expecting
5b91b7e0b8c77e1dcfe06a77f658ad97
Stack Trace:
java.io.IOException: Image file
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-22-branch/trunk/build/test/data/dfs/secondary/current/fsimage
is corrupt with MD5 checksum of a16b74ef257941883d505cb50a11adf1 but expecting
5b91b7e0b8c77e1dcfe06a77f658ad97
at
org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1062)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:678)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:583)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:460)
at
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:424)
at
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm410tm(TestStorageRestore.java:316)
at
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)