[jira] [Commented] (HDFS-8093) BP does not exist or is not under Constructionnull
[ https://issues.apache.org/jira/browse/HDFS-8093?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=15435275#comment-15435275 ] Tsz Wo Nicholas Sze commented on HDFS-8093: --- Then your cluster probably doesn't have HDFS-9365 which was committed to 2.7.3. > BP does not exist or is not under Constructionnull > -- > > Key: HDFS-8093 > URL: https://issues.apache.org/jira/browse/HDFS-8093 > Project: Hadoop HDFS > Issue Type: Bug > Components: balancer & mover >Affects Versions: 2.6.0 > Environment: Centos 6.5 >Reporter: LINTE > > HDFS balancer run during several hours blancing blocs beetween datanode, it > ended by failing with the following error. > getStoredBlock function return a null BlockInfo. > java.io.IOException: Bad response ERROR for block > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 from > datanode 192.168.0.18:1004 > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer$ResponseProcessor.run(DFSOutputStream.java:897) > 15/04/08 05:52:51 WARN hdfs.DFSClient: Error Recovery for block > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 in pipeline > 192.168.0.63:1004, 192.168.0.1:1004, 192.168.0.18:1004: bad datanode > 192.168.0.18:1004 > 15/04/08 05:52:51 WARN hdfs.DFSClient: DataStreamer Exception > org.apache.hadoop.ipc.RemoteException(java.io.IOException): > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 does not > exist or is not under Constructionnull > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6913) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6980) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:717) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:931) > at > org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:619) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:962) > at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2039) > at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2035) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:415) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2033) > at org.apache.hadoop.ipc.Client.call(Client.java:1468) > at org.apache.hadoop.ipc.Client.call(Client.java:1399) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:232) > at com.sun.proxy.$Proxy11.updateBlockForPipeline(Unknown Source) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:877) > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at > sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) > at > sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at java.lang.reflect.Method.invoke(Method.java:606) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102) > at com.sun.proxy.$Proxy12.updateBlockForPipeline(Unknown Source) > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.setupPipelineForAppendOrRecovery(DFSOutputStream.java:1266) > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.processDatanodeError(DFSOutputStream.java:1004) > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:548) > 15/04/08 05:52:51 ERROR hdfs.DFSClient: Failed to close inode 19801755 > org.apache.hadoop.ipc.RemoteException(java.io.IOException): > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 does not > exist or is not under Constructionnull > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6913) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6980) > at >
[jira] [Commented] (HDFS-8093) BP does not exist or is not under Constructionnull
[ https://issues.apache.org/jira/browse/HDFS-8093?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=15435064#comment-15435064 ] Max Schmidt commented on HDFS-8093: --- I am still facing this issue on my namenode (just happened once while creating a file with a java client), from my namenode.log: {code} java.io.IOException: BP-1876130894-10.5.0.4-1469019082320:blk_1073787208_63449 does not exist or is not under Constructionnull at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6238) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6305) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:804) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:955) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:969) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2049) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2045) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2043) {code} Iam using hadoop 2.7.1 with the corresponding java libraries. > BP does not exist or is not under Constructionnull > -- > > Key: HDFS-8093 > URL: https://issues.apache.org/jira/browse/HDFS-8093 > Project: Hadoop HDFS > Issue Type: Bug > Components: balancer & mover >Affects Versions: 2.6.0 > Environment: Centos 6.5 >Reporter: LINTE > > HDFS balancer run during several hours blancing blocs beetween datanode, it > ended by failing with the following error. > getStoredBlock function return a null BlockInfo. > java.io.IOException: Bad response ERROR for block > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 from > datanode 192.168.0.18:1004 > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer$ResponseProcessor.run(DFSOutputStream.java:897) > 15/04/08 05:52:51 WARN hdfs.DFSClient: Error Recovery for block > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 in pipeline > 192.168.0.63:1004, 192.168.0.1:1004, 192.168.0.18:1004: bad datanode > 192.168.0.18:1004 > 15/04/08 05:52:51 WARN hdfs.DFSClient: DataStreamer Exception > org.apache.hadoop.ipc.RemoteException(java.io.IOException): > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 does not > exist or is not under Constructionnull > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6913) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6980) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:717) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:931) > at > org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:619) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:962) > at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2039) > at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2035) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:415) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2033) > at org.apache.hadoop.ipc.Client.call(Client.java:1468) > at org.apache.hadoop.ipc.Client.call(Client.java:1399) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:232) > at com.sun.proxy.$Proxy11.updateBlockForPipeline(Unknown Source) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:877) > at
[jira] [Commented] (HDFS-8093) BP does not exist or is not under Constructionnull
[ https://issues.apache.org/jira/browse/HDFS-8093?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=14986929#comment-14986929 ] Xiaoyu Yao commented on HDFS-8093: -- [~Alexandre LINTE], thanks for the confirmation!. This one can be resolved as a dup of HDFS-9364 [~szetszwo] is working on. > BP does not exist or is not under Constructionnull > -- > > Key: HDFS-8093 > URL: https://issues.apache.org/jira/browse/HDFS-8093 > Project: Hadoop HDFS > Issue Type: Bug > Components: balancer & mover >Affects Versions: 2.6.0 > Environment: Centos 6.5 >Reporter: LINTE > > HDFS balancer run during several hours blancing blocs beetween datanode, it > ended by failing with the following error. > getStoredBlock function return a null BlockInfo. > java.io.IOException: Bad response ERROR for block > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 from > datanode 192.168.0.18:1004 > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer$ResponseProcessor.run(DFSOutputStream.java:897) > 15/04/08 05:52:51 WARN hdfs.DFSClient: Error Recovery for block > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 in pipeline > 192.168.0.63:1004, 192.168.0.1:1004, 192.168.0.18:1004: bad datanode > 192.168.0.18:1004 > 15/04/08 05:52:51 WARN hdfs.DFSClient: DataStreamer Exception > org.apache.hadoop.ipc.RemoteException(java.io.IOException): > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 does not > exist or is not under Constructionnull > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6913) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6980) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:717) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:931) > at > org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:619) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:962) > at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2039) > at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2035) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:415) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2033) > at org.apache.hadoop.ipc.Client.call(Client.java:1468) > at org.apache.hadoop.ipc.Client.call(Client.java:1399) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:232) > at com.sun.proxy.$Proxy11.updateBlockForPipeline(Unknown Source) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:877) > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at > sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) > at > sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at java.lang.reflect.Method.invoke(Method.java:606) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102) > at com.sun.proxy.$Proxy12.updateBlockForPipeline(Unknown Source) > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.setupPipelineForAppendOrRecovery(DFSOutputStream.java:1266) > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.processDatanodeError(DFSOutputStream.java:1004) > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:548) > 15/04/08 05:52:51 ERROR hdfs.DFSClient: Failed to close inode 19801755 > org.apache.hadoop.ipc.RemoteException(java.io.IOException): > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 does not > exist or is not under Constructionnull > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6913) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6980) > at >
[jira] [Commented] (HDFS-8093) BP does not exist or is not under Constructionnull
[ https://issues.apache.org/jira/browse/HDFS-8093?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=14986927#comment-14986927 ] LINTE commented on HDFS-8093: - No more mistakes for me with secure hadoop 2.7.1 and namenode HA settings for balancer. > BP does not exist or is not under Constructionnull > -- > > Key: HDFS-8093 > URL: https://issues.apache.org/jira/browse/HDFS-8093 > Project: Hadoop HDFS > Issue Type: Bug > Components: balancer & mover >Affects Versions: 2.6.0 > Environment: Centos 6.5 >Reporter: LINTE > > HDFS balancer run during several hours blancing blocs beetween datanode, it > ended by failing with the following error. > getStoredBlock function return a null BlockInfo. > java.io.IOException: Bad response ERROR for block > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 from > datanode 192.168.0.18:1004 > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer$ResponseProcessor.run(DFSOutputStream.java:897) > 15/04/08 05:52:51 WARN hdfs.DFSClient: Error Recovery for block > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 in pipeline > 192.168.0.63:1004, 192.168.0.1:1004, 192.168.0.18:1004: bad datanode > 192.168.0.18:1004 > 15/04/08 05:52:51 WARN hdfs.DFSClient: DataStreamer Exception > org.apache.hadoop.ipc.RemoteException(java.io.IOException): > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 does not > exist or is not under Constructionnull > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6913) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6980) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:717) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:931) > at > org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:619) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:962) > at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2039) > at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2035) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:415) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2033) > at org.apache.hadoop.ipc.Client.call(Client.java:1468) > at org.apache.hadoop.ipc.Client.call(Client.java:1399) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:232) > at com.sun.proxy.$Proxy11.updateBlockForPipeline(Unknown Source) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:877) > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at > sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) > at > sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at java.lang.reflect.Method.invoke(Method.java:606) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102) > at com.sun.proxy.$Proxy12.updateBlockForPipeline(Unknown Source) > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.setupPipelineForAppendOrRecovery(DFSOutputStream.java:1266) > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.processDatanodeError(DFSOutputStream.java:1004) > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:548) > 15/04/08 05:52:51 ERROR hdfs.DFSClient: Failed to close inode 19801755 > org.apache.hadoop.ipc.RemoteException(java.io.IOException): > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 does not > exist or is not under Constructionnull > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6913) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6980) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:717) > at >
[jira] [Commented] (HDFS-8093) BP does not exist or is not under Constructionnull
[ https://issues.apache.org/jira/browse/HDFS-8093?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=14986922#comment-14986922 ] Xiaoyu Yao commented on HDFS-8093: -- [~fborchers], do you have namenode HA setup? If yes, does it still repro when you explicitly specify active NN as follows? {code} hdfs balancer -fs http://activeNN:8020 -threshold 5 {code} > BP does not exist or is not under Constructionnull > -- > > Key: HDFS-8093 > URL: https://issues.apache.org/jira/browse/HDFS-8093 > Project: Hadoop HDFS > Issue Type: Bug > Components: balancer & mover >Affects Versions: 2.6.0 > Environment: Centos 6.5 >Reporter: LINTE > > HDFS balancer run during several hours blancing blocs beetween datanode, it > ended by failing with the following error. > getStoredBlock function return a null BlockInfo. > java.io.IOException: Bad response ERROR for block > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 from > datanode 192.168.0.18:1004 > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer$ResponseProcessor.run(DFSOutputStream.java:897) > 15/04/08 05:52:51 WARN hdfs.DFSClient: Error Recovery for block > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 in pipeline > 192.168.0.63:1004, 192.168.0.1:1004, 192.168.0.18:1004: bad datanode > 192.168.0.18:1004 > 15/04/08 05:52:51 WARN hdfs.DFSClient: DataStreamer Exception > org.apache.hadoop.ipc.RemoteException(java.io.IOException): > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 does not > exist or is not under Constructionnull > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6913) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6980) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:717) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:931) > at > org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:619) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:962) > at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2039) > at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2035) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:415) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2033) > at org.apache.hadoop.ipc.Client.call(Client.java:1468) > at org.apache.hadoop.ipc.Client.call(Client.java:1399) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:232) > at com.sun.proxy.$Proxy11.updateBlockForPipeline(Unknown Source) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:877) > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at > sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) > at > sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at java.lang.reflect.Method.invoke(Method.java:606) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102) > at com.sun.proxy.$Proxy12.updateBlockForPipeline(Unknown Source) > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.setupPipelineForAppendOrRecovery(DFSOutputStream.java:1266) > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.processDatanodeError(DFSOutputStream.java:1004) > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:548) > 15/04/08 05:52:51 ERROR hdfs.DFSClient: Failed to close inode 19801755 > org.apache.hadoop.ipc.RemoteException(java.io.IOException): > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 does not > exist or is not under Constructionnull > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6913) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6980) > at >
[jira] [Commented] (HDFS-8093) BP does not exist or is not under Constructionnull
[ https://issues.apache.org/jira/browse/HDFS-8093?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=14986930#comment-14986930 ] Xiaoyu Yao commented on HDFS-8093: -- It should be HDFS-9365. I can't edit my previous comments after it is posted. > BP does not exist or is not under Constructionnull > -- > > Key: HDFS-8093 > URL: https://issues.apache.org/jira/browse/HDFS-8093 > Project: Hadoop HDFS > Issue Type: Bug > Components: balancer & mover >Affects Versions: 2.6.0 > Environment: Centos 6.5 >Reporter: LINTE > > HDFS balancer run during several hours blancing blocs beetween datanode, it > ended by failing with the following error. > getStoredBlock function return a null BlockInfo. > java.io.IOException: Bad response ERROR for block > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 from > datanode 192.168.0.18:1004 > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer$ResponseProcessor.run(DFSOutputStream.java:897) > 15/04/08 05:52:51 WARN hdfs.DFSClient: Error Recovery for block > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 in pipeline > 192.168.0.63:1004, 192.168.0.1:1004, 192.168.0.18:1004: bad datanode > 192.168.0.18:1004 > 15/04/08 05:52:51 WARN hdfs.DFSClient: DataStreamer Exception > org.apache.hadoop.ipc.RemoteException(java.io.IOException): > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 does not > exist or is not under Constructionnull > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6913) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6980) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:717) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:931) > at > org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:619) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:962) > at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2039) > at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2035) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:415) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2033) > at org.apache.hadoop.ipc.Client.call(Client.java:1468) > at org.apache.hadoop.ipc.Client.call(Client.java:1399) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:232) > at com.sun.proxy.$Proxy11.updateBlockForPipeline(Unknown Source) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:877) > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at > sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) > at > sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at java.lang.reflect.Method.invoke(Method.java:606) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102) > at com.sun.proxy.$Proxy12.updateBlockForPipeline(Unknown Source) > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.setupPipelineForAppendOrRecovery(DFSOutputStream.java:1266) > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.processDatanodeError(DFSOutputStream.java:1004) > at > org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:548) > 15/04/08 05:52:51 ERROR hdfs.DFSClient: Failed to close inode 19801755 > org.apache.hadoop.ipc.RemoteException(java.io.IOException): > BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 does not > exist or is not under Constructionnull > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6913) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6980) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:717) > at >
[jira] [Commented] (HDFS-8093) BP does not exist or is not under Constructionnull
[ https://issues.apache.org/jira/browse/HDFS-8093?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14698672#comment-14698672 ] Felix Borchers commented on HDFS-8093: -- grep /system/balancer.id hadoop-hdfs-namenode-devhmn02.rz.is.log.1 {code} ... 2015-08-14 00:30:03,843 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /system/balancer.id. BP-322804774-10.13.54.1-1412684451669 blk_1074256920_516292{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[[DISK]DS-4db312aa-bc23-47dc-b768-52a2d72b09d3:NORMAL:10.13.53.30:50010|RBW], ReplicaUnderConstruction[[DISK]DS-c7db1b58-8e25-435f-8af8-08b6754c021c:NORMAL:10.13.53.16:50010|RBW], ReplicaUnderConstruction[[DISK]DS-4457ae11-7684-4187-b4ad-56466d79fba2:NORMAL:10.13.53.19:50010|RBW]]} 2015-08-14 00:30:03,958 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* fsync: /system/balancer.id for DFSClient_NONMAPREDUCE_-1841368225_1 2015-08-14 00:30:03,986 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /system/balancer.id. BP-322804774-10.13.54.1-1412684451669 blk_1074256921_516293{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[[DISK]DS-8f3d8860-b977-4b7b-b681-d25c112ad1f3:NORMAL:10.13.53.14:50010|RBW], ReplicaUnderConstruction[[DISK]DS-abb5362f-6d29-478f-a678-53f09c096871:NORMAL:10.13.53.12:50010|RBW], ReplicaUnderConstruction[[DISK]DS-b02f3ebc-955e-4e11-82df-dc51278dc06f:NORMAL:10.13.53.17:50010|RBW]]} 2015-08-14 00:30:04,002 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* fsync: /system/balancer.id for DFSClient_NONMAPREDUCE_-1841368225_1 2015-08-14 00:46:44,975 WARN org.apache.hadoop.security.UserGroupInformation: PriviledgedActionException as:hdfs (auth:SIMPLE) cause:org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException: No lease on /system/balancer.id (inode 709043): File does not exist. Holder DFSClient_NONMAPREDUCE_-1841368225_1 does not have any open files. 2015-08-14 00:46:44,975 INFO org.apache.hadoop.ipc.Server: IPC Server handler 2 on 8020, call org.apache.hadoop.hdfs.protocol.ClientProtocol.complete from 10.13.52.1:58633 Call#220 Retry#0: org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException: No lease on /system/balancer.id (inode 709043): File does not exist. Holder DFSClient_NONMAPREDUCE_-1841368225_1 does not have any open files. ... {code} The LogMessages are between the two timestamps are: {code} 2015-08-14 00:30:03,843 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /system/balancer.id. BP-322804774-10.13.54.1-1412684451669 blk_1074256920_516292{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[[DISK]DS-4db312aa-bc23-47dc-b768-52a2d72b09d3:NORMAL:10.13.53.30:50010|RBW], ReplicaUnderConstruction[[DISK]DS-c7db1b58-8e25-435f-8af8-08b6754c021c:NORMAL:10.13.53.16:50010|RBW], ReplicaUnderConstruction[[DISK]DS-4457ae11-7684-4187-b4ad-56466d79fba2:NORMAL:10.13.53.19:50010|RBW]]} 2015-08-14 00:30:03,958 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* fsync: /system/balancer.id for DFSClient_NONMAPREDUCE_-1841368225_1 2015-08-14 00:30:03,986 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /system/balancer.id. BP-322804774-10.13.54.1-1412684451669 blk_1074256921_516293{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[[DISK]DS-8f3d8860-b977-4b7b-b681-d25c112ad1f3:NORMAL:10.13.53.14:50010|RBW], ReplicaUnderConstruction[[DISK]DS-abb5362f-6d29-478f-a678-53f09c096871:NORMAL:10.13.53.12:50010|RBW], ReplicaUnderConstruction[[DISK]DS-b02f3ebc-955e-4e11-82df-dc51278dc06f:NORMAL:10.13.53.17:50010|RBW]]} 2015-08-14 00:30:04,000 INFO BlockStateChange: BLOCK* addBlock: block blk_1074256920_516292 on node 10.13.53.16:50010 size 134217728 does not belong to any file 2015-08-14 00:30:04,000 INFO BlockStateChange: BLOCK* InvalidateBlocks: add blk_1074256920_516292 to 10.13.53.16:50010 2015-08-14 00:30:04,000 INFO BlockStateChange: BLOCK* BlockManager: ask 10.13.53.16:50010 to delete [blk_1074256920_516292] 2015-08-14 00:30:04,000 INFO BlockStateChange: BLOCK* BlockManager: ask 10.13.53.14:50010 to delete [blk_1074256910_516282] 2015-08-14 00:30:04,002 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* fsync: /system/balancer.id for DFSClient_NONMAPREDUCE_-1841368225_1 2015-08-14 00:30:04,213 INFO BlockStateChange: BLOCK* addStoredBlock: blockMap updated: 10.13.53.14:50010 is added to blk_1074256517_515889 size 9460 2015-08-14 00:30:04,214 INFO BlockStateChange: BLOCK* InvalidateBlocks: add blk_1074256517_515889 to 10.13.53.30:50010 2015-08-14 00:30:04,214 INFO BlockStateChange: BLOCK* chooseExcessReplicates: ([DISK]DS-4db312aa-bc23-47dc-b768-52a2d72b09d3:NORMAL:10.13.53.30:50010, blk_1074256517_515889) is added to invalidated blocks set {code} BP does not exist or is not under Constructionnull
[jira] [Commented] (HDFS-8093) BP does not exist or is not under Constructionnull
[ https://issues.apache.org/jira/browse/HDFS-8093?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14696757#comment-14696757 ] Felix Borchers commented on HDFS-8093: -- I have a very similar problem while running the balancer. {{hdfs fsck /}} returned HEALTHY and the block, causing the balancer to throw an exception is not in the HDFS anymore. {{hdfs fsck / -files -blocks | grep blk_1074256920_516292}} - returned nothing Digging in the logs of the DataNode shows, that the block was deleted on the node. (see below for log file excerpt) Digging in the logs of the NameNode shows, something like block does not belong to any file (see below for log file excerpt) It seems, there is a problem with removed/deleted blocks ?! DataNode Logs = only lines with: blk_1074256920_516292 displayed: {code} 2015-08-14 00:30:03,893 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-322804774-10.13.54.1-1412684451669:blk_1074256920_516292 src: /10.13.53.16:37605 dest: /10.13.53.19:50010 2015-08-14 00:30:07,841 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Scheduling blk_1074256920_516292 file /data/is24/hadoop/1/dfs/dataNode/current/BP-322804774-10.13.54.1-1412684451669/current/rbw/blk_1074256920 for deletion 2015-08-14 00:30:09,092 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Deleted BP-322804774-10.13.54.1-1412684451669 blk_1074256920_516292 file /data/is24/hadoop/1/dfs/dataNode/current/BP-322804774-10.13.54.1-1412684451669/current/rbw/blk_1074256920 org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: Cannot append to a non-existent replica BP-322804774-10.13.54.1-1412684451669:blk_1074256920_516292 2015-08-14 00:46:44,916 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-322804774-10.13.54.1-1412684451669:blk_1074256920_516292, type=LAST_IN_PIPELINE, downstreams=0:[] org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: Cannot append to a non-existent replica BP-322804774-10.13.54.1-1412684451669:blk_1074256920_516292 2015-08-14 00:46:44,916 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-322804774-10.13.54.1-1412684451669:blk_1074256920_516292, type=LAST_IN_PIPELINE, downstreams=0:[] terminating {code} NameNode Logs = only lines with: blk_1074256920_516292 displayed: {code} 2015-08-14 00:30:03,843 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /system/balancer.id. BP-322804774-10.13.54.1-1412684451669 blk_1074256920_516292{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[[DISK]DS-4db312aa-bc23-47dc-b768-52a2d72b09d3:NORMAL:10.13.53.30:50010|RBW], ReplicaUnderConstruction[[DISK]DS-c7db1b58-8e25-435f-8af8-08b6754c021c:NORMAL:10.13.53.16:50010|RBW], ReplicaUnderConstruction[[DISK]DS-4457ae11-7684-4187-b4ad-56466d79fba2:NORMAL:10.13.53.19:50010|RBW]]} 2015-08-14 00:30:04,000 INFO BlockStateChange: BLOCK* addBlock: c blk_1074256920_516292 on node 10.13.53.16:50010 size 134217728 does not belong to any file 2015-08-14 00:30:04,000 INFO BlockStateChange: BLOCK* InvalidateBlocks: add blk_1074256920_516292 to 10.13.53.16:50010 2015-08-14 00:30:04,000 INFO BlockStateChange: BLOCK* BlockManager: ask 10.13.53.16:50010 to delete [blk_1074256920_516292] 2015-08-14 00:30:04,840 INFO BlockStateChange: BLOCK* addBlock: block blk_1074256920_516292 on node 10.13.53.19:50010 size 134217728 does not belong to any file 2015-08-14 00:30:04,840 INFO BlockStateChange: BLOCK* InvalidateBlocks: add blk_1074256920_516292 to 10.13.53.19:50010 2015-08-14 00:30:05,925 INFO BlockStateChange: BLOCK* addBlock: block blk_1074256920_516292 on node 10.13.53.30:50010 size 134217728 does not belong to any file 2015-08-14 00:30:05,925 INFO BlockStateChange: BLOCK* InvalidateBlocks: add blk_1074256920_516292 to 10.13.53.30:50010 2015-08-14 00:30:07,000 INFO BlockStateChange: BLOCK* BlockManager: ask 10.13.53.19:50010 to delete [blk_1074208004_467362, blk_1074224392_483753, blk_1074093070_352362, blk_1074240530_499900, blk_1074256920_516292, blk_1074224154_483515, blk_1074240554_499924, blk_1074240556_499926, blk_1074240561_499931, blk_1074224178_483539, blk_1074240563_499933, blk_1074207795_467153, blk_1074093108_352429, blk_1074207797_467155, blk_1073798197_57374, blk_1074224182_483543, blk_1074240569_499939, blk_1074207802_467160, blk_1074224187_483548, blk_1074224188_483549, blk_1074207805_467163, blk_1074158653_418001, blk_1074207806_467164, blk_1074224191_483552, blk_1074207809_467167, blk_1074207817_467175, blk_1074207818_467176, blk_1074207820_467178, blk_1074207822_467180, blk_1074207830_467188, blk_1074224216_483577, blk_1074224217_483578, blk_1073798237_57414, blk_1073929310_188502, blk_1074207843_467201, blk_1073847400_106577, blk_1074207852_467210,
[jira] [Commented] (HDFS-8093) BP does not exist or is not under Constructionnull
[ https://issues.apache.org/jira/browse/HDFS-8093?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14697395#comment-14697395 ] Tsz Wo Nicholas Sze commented on HDFS-8093: --- The file /system/balancer.id seems to be deleted. Could you grep /system/balancer.id from the NN log? Also, are there other log messages between 2015-08-14 00:30:03,843 and 2015-08-14 00:30:04,000? BP does not exist or is not under Constructionnull -- Key: HDFS-8093 URL: https://issues.apache.org/jira/browse/HDFS-8093 Project: Hadoop HDFS Issue Type: Bug Components: balancer mover Affects Versions: 2.6.0 Environment: Centos 6.5 Reporter: LINTE HDFS balancer run during several hours blancing blocs beetween datanode, it ended by failing with the following error. getStoredBlock function return a null BlockInfo. java.io.IOException: Bad response ERROR for block BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 from datanode 192.168.0.18:1004 at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer$ResponseProcessor.run(DFSOutputStream.java:897) 15/04/08 05:52:51 WARN hdfs.DFSClient: Error Recovery for block BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 in pipeline 192.168.0.63:1004, 192.168.0.1:1004, 192.168.0.18:1004: bad datanode 192.168.0.18:1004 15/04/08 05:52:51 WARN hdfs.DFSClient: DataStreamer Exception org.apache.hadoop.ipc.RemoteException(java.io.IOException): BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 does not exist or is not under Constructionnull at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6913) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:717) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:931) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:619) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:962) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2039) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2035) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:415) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2033) at org.apache.hadoop.ipc.Client.call(Client.java:1468) at org.apache.hadoop.ipc.Client.call(Client.java:1399) at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:232) at com.sun.proxy.$Proxy11.updateBlockForPipeline(Unknown Source) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:877) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187) at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102) at com.sun.proxy.$Proxy12.updateBlockForPipeline(Unknown Source) at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.setupPipelineForAppendOrRecovery(DFSOutputStream.java:1266) at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.processDatanodeError(DFSOutputStream.java:1004) at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:548) 15/04/08 05:52:51 ERROR hdfs.DFSClient: Failed to close inode 19801755 org.apache.hadoop.ipc.RemoteException(java.io.IOException): BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 does not exist or is not under Constructionnull at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6913) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6980) at
[jira] [Commented] (HDFS-8093) BP does not exist or is not under Constructionnull
[ https://issues.apache.org/jira/browse/HDFS-8093?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=14629671#comment-14629671 ] Neill Lima commented on HDFS-8093: -- ./hadoop fsck / Returns what on your case? I am having this problem after changing some IP and DNS information on the boxes that run Hadoop. In my case the older IP address is shown in some BPs. BP does not exist or is not under Constructionnull -- Key: HDFS-8093 URL: https://issues.apache.org/jira/browse/HDFS-8093 Project: Hadoop HDFS Issue Type: Bug Components: balancer mover Affects Versions: 2.6.0 Environment: Centos 6.5 Reporter: LINTE HDFS balancer run during several hours blancing blocs beetween datanode, it ended by failing with the following error. getStoredBlock function return a null BlockInfo. java.io.IOException: Bad response ERROR for block BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 from datanode 192.168.0.18:1004 at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer$ResponseProcessor.run(DFSOutputStream.java:897) 15/04/08 05:52:51 WARN hdfs.DFSClient: Error Recovery for block BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 in pipeline 192.168.0.63:1004, 192.168.0.1:1004, 192.168.0.18:1004: bad datanode 192.168.0.18:1004 15/04/08 05:52:51 WARN hdfs.DFSClient: DataStreamer Exception org.apache.hadoop.ipc.RemoteException(java.io.IOException): BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 does not exist or is not under Constructionnull at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6913) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:717) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:931) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:619) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:962) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2039) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2035) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:415) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2033) at org.apache.hadoop.ipc.Client.call(Client.java:1468) at org.apache.hadoop.ipc.Client.call(Client.java:1399) at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:232) at com.sun.proxy.$Proxy11.updateBlockForPipeline(Unknown Source) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:877) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187) at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102) at com.sun.proxy.$Proxy12.updateBlockForPipeline(Unknown Source) at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.setupPipelineForAppendOrRecovery(DFSOutputStream.java:1266) at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.processDatanodeError(DFSOutputStream.java:1004) at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:548) 15/04/08 05:52:51 ERROR hdfs.DFSClient: Failed to close inode 19801755 org.apache.hadoop.ipc.RemoteException(java.io.IOException): BP-970443206-192.168.0.208-1397583979378:blk_1086729930_13046030 does not exist or is not under Constructionnull at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:6913) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.updateBlockForPipeline(FSNamesystem.java:6980) at