See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/530/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE 
###########################
[...truncated 655556 lines...]
    [junit] 2010-12-23 12:36:36,620 WARN  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already 
shut down.
    [junit] 2010-12-23 12:36:36,620 INFO  hdfs.MiniDFSCluster 
(MiniDFSCluster.java:shutdownDataNodes(786)) - Shutting down DataNode 0
    [junit] 2010-12-23 12:36:36,735 INFO  ipc.Server (Server.java:stop(1611)) - 
Stopping server on 53369
    [junit] 2010-12-23 12:36:36,736 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 0 on 53369: exiting
    [junit] 2010-12-23 12:36:36,736 INFO  datanode.DataNode 
(DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads 
is 1
    [junit] 2010-12-23 12:36:36,736 WARN  datanode.DataNode 
(DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:39149, 
storageID=DS-2023999281-127.0.1.1-39149-1293107785734, infoPort=33426, 
ipcPort=53369):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit]     at 
java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit]     at 
sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit]     at 
sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit]     at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-23 12:36:36,737 INFO  ipc.Server (Server.java:run(675)) - 
Stopping IPC Server Responder
    [junit] 2010-12-23 12:36:36,737 INFO  ipc.Server (Server.java:run(475)) - 
Stopping IPC Server listener on 53369
    [junit] 2010-12-23 12:36:36,738 INFO  datanode.DataNode 
(DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads 
is 0
    [junit] 2010-12-23 12:36:36,783 INFO  datanode.DataBlockScanner 
(DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-23 12:36:36,839 INFO  datanode.DataNode 
(DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:39149, 
storageID=DS-2023999281-127.0.1.1-39149-1293107785734, infoPort=33426, 
ipcPort=53369):Finishing DataNode in: 
FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-23 12:36:36,839 INFO  ipc.Server (Server.java:stop(1611)) - 
Stopping server on 53369
    [junit] 2010-12-23 12:36:36,839 INFO  datanode.DataNode 
(DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads 
is 0
    [junit] 2010-12-23 12:36:36,840 INFO  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk 
service threads...
    [junit] 2010-12-23 12:36:36,840 INFO  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads 
have been shut down.
    [junit] 2010-12-23 12:36:36,840 WARN  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already 
shut down.
    [junit] 2010-12-23 12:36:36,843 WARN  namenode.FSNamesystem 
(FSNamesystem.java:run(2822)) - ReplicationMonitor thread received 
InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-23 12:36:36,843 WARN  namenode.DecommissionManager 
(DecommissionManager.java:run(70)) - Monitor interrupted: 
java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-23 12:36:36,843 INFO  namenode.FSEditLog 
(FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time 
for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of 
syncs: 3 SyncTimes(ms): 7 2 
    [junit] 2010-12-23 12:36:36,845 INFO  ipc.Server (Server.java:stop(1611)) - 
Stopping server on 57567
    [junit] 2010-12-23 12:36:36,845 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 6 on 57567: exiting
    [junit] 2010-12-23 12:36:36,845 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 3 on 57567: exiting
    [junit] 2010-12-23 12:36:36,846 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 7 on 57567: exiting
    [junit] 2010-12-23 12:36:36,845 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 4 on 57567: exiting
    [junit] 2010-12-23 12:36:36,846 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 9 on 57567: exiting
    [junit] 2010-12-23 12:36:36,847 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 1 on 57567: exiting
    [junit] 2010-12-23 12:36:36,845 INFO  ipc.Server (Server.java:run(675)) - 
Stopping IPC Server Responder
    [junit] 2010-12-23 12:36:36,845 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 0 on 57567: exiting
    [junit] 2010-12-23 12:36:36,847 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 2 on 57567: exiting
    [junit] 2010-12-23 12:36:36,846 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 5 on 57567: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.314 sec
    [junit] 2010-12-23 12:36:36,846 INFO  ipc.Server (Server.java:run(1444)) - 
IPC Server handler 8 on 57567: exiting
    [junit] 2010-12-23 12:36:36,846 INFO  ipc.Server (Server.java:run(475)) - 
Stopping IPC Server listener on 57567

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:735: 
Tests failed!

Total time: 60 minutes 14 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) 
##############################
5 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0

Error Message:
127.0.0.1:36178is not an underUtilized node

Stack Trace:
junit.framework.AssertionFailedError: 127.0.0.1:36178is not an underUtilized 
node
        at 
org.apache.hadoop.hdfs.server.balancer.Balancer.initNodes(Balancer.java:1012)
        at 
org.apache.hadoop.hdfs.server.balancer.Balancer.initNodes(Balancer.java:954)
        at 
org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1497)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.twoNodeTest(TestBalancer.java:312)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_29j3j5brym(TestBalancer.java:328)
        at 
org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0(TestBalancer.java:324)


FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
        at sun.nio.ch.IOUtil.initPipe(Native Method)
        at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
        at 
sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
        at java.nio.channels.Selector.open(Selector.java:209)
        at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
        at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
        at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
        at 
org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
        at 
org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
        at 
org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
        at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.
        at 
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.
        at 
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
 is corrupt with MD5 checksum of 35bda2a9eb132b8884e16781b373cc83 but expecting 
c19f5cb617e5823848b8bd2b191ab709

Stack Trace:
java.io.IOException: Image file 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
 is corrupt with MD5 checksum of 35bda2a9eb132b8884e16781b373cc83 but expecting 
c19f5cb617e5823848b8bd2b191ab709
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
        at 
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjd(TestStorageRestore.java:316)
        at 
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)



Reply via email to