Is "nfslock" already running?
If not, these steps may help:
    sudo /etc/init.d/portmap start
    sudo /etc/init.d/nfs start
    sudo /etc/init.d/nfslock start
    sudo /sbin/chkconfig --level 12345 portmap on
    sudo /sbin/chkconfig --level 12345 nfs on
    sudo /sbin/chkconfig --level 12345 nfslock on

2010/3/4 Zheng Lv <lvzheng19800...@gmail.com>

> Hello Everyone,
>  I added a NFS mount point to the dfs.name.dir configuration option, but
> after that when I restarted the hadoop cluster I got these:
>    ************************************************************/
> 2010-03-03 18:32:59,708 INFO org.apache.hadoop.ipc.metrics.RpcMetrics:
> Initializing RPC Metrics with hostName=NameNode, port=9000
> 2010-03-03 18:32:59,714 INFO
> org.apache.hadoop.hdfs.server.namenode.NameNode: Namenode up at: cactus207/
> 172.16.1.207:9000
> 2010-03-03 18:32:59,723 INFO org.apache.hadoop.metrics.jvm.JvmMetrics:
> Initializing JVM Metrics with processName=NameNode, sessionId=null
> 2010-03-03 18:32:59,724 INFO
> org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics:
> Initializing
> NameNodeMeterics using context object:org.apache.had
> oop.metrics.spi.NullContext
> 2010-03-03 18:32:59,798 INFO
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem:
> fsOwner=root,root,bin,daemon,sys,adm,disk,wheel
> 2010-03-03 18:32:59,798 INFO
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem: supergroup=supergroup
> 2010-03-03 18:32:59,798 INFO
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem:
> isPermissionEnabled=true
> 2010-03-03 18:32:59,805 INFO
> org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics:
> Initializing FSNamesystemMetrics using context object:org.apa
> che.hadoop.metrics.spi.NullContext
> 2010-03-03 18:32:59,806 INFO
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Registered
> FSNamesystemStatusMBean
> 2010-03-03 18:33:29,856 INFO org.apache.hadoop.hdfs.server.common.Storage:
> java.io.IOException: No locks available
>        at sun.nio.ch.FileChannelImpl.lock0(Native Method)
>        at sun.nio.ch.FileChannelImpl.tryLock(FileChannelImpl.java:879)
>        at java.nio.channels.FileChannel.tryLock(FileChannel.java:962)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.tryLock(Storage.java:527)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:505)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.analyzeStorage(Storage.java:363)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:285)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSDirectory.loadFSImage(FSDirectory.java:87)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:311)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:292)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:201)
>        at
> org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:279)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:956)
>        at
> org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:965)
> 2010-03-03 18:33:29,858 ERROR
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem: FSNamesystem
> initialization failed.
> java.io.IOException: No locks available
>        at sun.nio.ch.FileChannelImpl.lock0(Native Method)
>        at sun.nio.ch.FileChannelImpl.tryLock(FileChannelImpl.java:879)
>        at java.nio.channels.FileChannel.tryLock(FileChannel.java:962)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.tryLock(Storage.java:527)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:505)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.analyzeStorage(Storage.java:363)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:285)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSDirectory.loadFSImage(FSDirectory.java:87)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:311)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:292)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:201)
>        at
> org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:279)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:956)
>        at
> org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:965)
> 2010-03-03 18:33:29,859 INFO org.apache.hadoop.ipc.Server: Stopping server
> on 9000
> 2010-03-03 18:33:29,859 ERROR
> org.apache.hadoop.hdfs.server.namenode.NameNode: java.io.IOException: No
> locks available
>        at sun.nio.ch.FileChannelImpl.lock0(Native Method)
>        at sun.nio.ch.FileChannelImpl.tryLock(FileChannelImpl.java:879)
>        at java.nio.channels.FileChannel.tryLock(FileChannel.java:962)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.tryLock(Storage.java:527)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:505)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.analyzeStorage(Storage.java:363)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:285)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSDirectory.loadFSImage(FSDirectory.java:87)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:311)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:292)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:201)
>        at
> org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:279)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:956)
>        at
> org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:965)
> 2010-03-03 18:33:29,861 INFO
> org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
> /************************************************************
> SHUTDOWN_MSG: Shutting down NameNode at cactus207/172.16.1.207
> ************************************************************/
> 2010-03-03 18:41:32,635 INFO org.apache.hadoop.ipc.metrics.RpcMetrics:
> Initializing RPC Metrics with hostName=NameNode, port=9000
> 2010-03-03 18:41:32,641 INFO
> org.apache.hadoop.hdfs.server.namenode.NameNode: Namenode up at: cactus207/
> 172.16.1.207:9000
> 2010-03-03 18:41:32,643 INFO org.apache.hadoop.metrics.jvm.JvmMetrics:
> Initializing JVM Metrics with processName=NameNode, sessionId=null
> 2010-03-03 18:41:32,644 INFO
> org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics:
> Initializing
> NameNodeMeterics using context object:org.apache.had
> oop.metrics.spi.NullContext
> 2010-03-03 18:41:32,692 INFO
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem:
> fsOwner=root,root,bin,daemon,sys,adm,disk,wheel
> 2010-03-03 18:41:32,693 INFO
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem: supergroup=supergroup
> 2010-03-03 18:41:32,693 INFO
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem:
> isPermissionEnabled=true
> 2010-03-03 18:41:32,699 INFO
> org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics:
> Initializing FSNamesystemMetrics using context object:org.apa
> che.hadoop.metrics.spi.NullContext
> 2010-03-03 18:41:32,701 INFO
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Registered
> FSNamesystemStatusMBean
> 2010-03-03 18:42:02,749 INFO org.apache.hadoop.hdfs.server.common.Storage:
> java.io.IOException: No locks available
>        at sun.nio.ch.FileChannelImpl.lock0(Native Method)
>        at sun.nio.ch.FileChannelImpl.tryLock(FileChannelImpl.java:879)
>        at java.nio.channels.FileChannel.tryLock(FileChannel.java:962)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.tryLock(Storage.java:527)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:505)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.analyzeStorage(Storage.java:363)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:285)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSDirectory.loadFSImage(FSDirectory.java:87)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:311)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:292)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:201)
>        at
> org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:279)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:956)
>        at
> org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:965)
> 2010-03-03 18:42:02,751 ERROR
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem: FSNamesystem
> initialization failed.
> java.io.IOException: No locks available
>        at sun.nio.ch.FileChannelImpl.lock0(Native Method)
>        at sun.nio.ch.FileChannelImpl.tryLock(FileChannelImpl.java:879)
>        at java.nio.channels.FileChannel.tryLock(FileChannel.java:962)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.tryLock(Storage.java:527)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:505)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.analyzeStorage(Storage.java:363)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:285)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSDirectory.loadFSImage(FSDirectory.java:87)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:311)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:292)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:201)
>        at
> org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:279)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:956)
>        at
> org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:965)
> 2010-03-03 18:42:02,752 INFO org.apache.hadoop.ipc.Server: Stopping server
> on 9000
> 2010-03-03 18:42:02,752 ERROR
> org.apache.hadoop.hdfs.server.namenode.NameNode: java.io.IOException: No
> locks available
>        at sun.nio.ch.FileChannelImpl.lock0(Native Method)
>        at sun.nio.ch.FileChannelImpl.tryLock(FileChannelImpl.java:879)
>        at java.nio.channels.FileChannel.tryLock(FileChannel.java:962)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.tryLock(Storage.java:527)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:505)
>        at
>
> org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.analyzeStorage(Storage.java:363)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:285)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSDirectory.loadFSImage(FSDirectory.java:87)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:311)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:292)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:201)
>        at
> org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:279)
>        at
>
> org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:956)
>        at
> org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:965)
> 2010-03-03 18:42:02,753 INFO
> org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
> /************************************************************
>  When I removed the NFS mount from the configuration and restarted the
> hadoop cluster again, it startted up normally.
>  Any ideas?
>  Thanks a lot.
>  LvZheng.
>



-- 
方阳
后端开发工程师
Douban Inc.
msn:franklin.f...@hotmail.com
gtalk:franklin.f...@gmail.com
skype:franklin.fang
北京市酒仙桥路14号51楼A1区1门2016,100016

Reply via email to