[ https://issues.apache.org/jira/browse/HDFS-9283?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
Jagadesh Kiran N reassigned HDFS-9283: -------------------------------------- Assignee: Jagadesh Kiran N > DFS client retry trace should not be dispalyed in CLI > ------------------------------------------------------ > > Key: HDFS-9283 > URL: https://issues.apache.org/jira/browse/HDFS-9283 > Project: Hadoop HDFS > Issue Type: Bug > Reporter: Jagadesh Kiran N > Assignee: Jagadesh Kiran N > > In Secure HA Mode when we try to > {code} > ./hdfs fsck / > {code} > If more than 2 name nodes are present it will through the below exception and > stack trace. > {code} > 15/10/22 18:39:06 INFO retry.RetryInvocationHandler: Exception while invoking > getFileInfo of class ClientNamenodeProtocolTranslatorPB over > /10.18.111.177:65110 after 1 fail over attempts. Trying to fail over after > sleeping for 843ms. > org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.ipc.StandbyException): > Operation category READ is not supported in state standby > at > org.apache.hadoop.hdfs.server.namenode.ha.StandbyState.checkOperation(StandbyState.java:87) > at > org.apache.hadoop.hdfs.server.namenode.NameNode$NameNodeHAContext.checkOperation(NameNode.java:1875) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkOperation(FSNamesystem.java:1297) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getFileInfo(FSNamesystem.java:3745) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.getFileInfo(NameNodeRpcServer.java:1014) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.getFileInfo(ClientNamenodeProtocolServerSideTranslatorPB.java:853) > at > org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:973) > at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2088) > at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2084) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:422) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1672) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2082) > at org.apache.hadoop.ipc.Client.call(Client.java:1511) > at org.apache.hadoop.ipc.Client.call(Client.java:1442) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229) > at com.sun.proxy.$Proxy9.getFileInfo(Unknown Source) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getFileInfo(ClientNamenodeProtocolTranslatorPB.java:771) > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at > sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) > at > sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at java.lang.reflect.Method.invoke(Method.java:497) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102) > at com.sun.proxy.$Proxy10.getFileInfo(Unknown Source) > at org.apache.hadoop.hdfs.DFSClient.getFileInfo(DFSClient.java:1789) > at > org.apache.hadoop.hdfs.DistributedFileSystem$25.doCall(DistributedFileSystem.java:1387) > at > org.apache.hadoop.hdfs.DistributedFileSystem$25.doCall(DistributedFileSystem.java:1383) > at > org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) > at > org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(DistributedFileSystem.java:1383) > at org.apache.hadoop.fs.FileSystem.resolvePath(FileSystem.java:753) > at org.apache.hadoop.hdfs.tools.DFSck.getResolvedPath(DFSck.java:232) > at org.apache.hadoop.hdfs.tools.DFSck.doWork(DFSck.java:311) > at org.apache.hadoop.hdfs.tools.DFSck.access$000(DFSck.java:73) > at org.apache.hadoop.hdfs.tools.DFSck$1.run(DFSck.java:151) > at org.apache.hadoop.hdfs.tools.DFSck$1.run(DFSck.java:148) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:422) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1672) > at org.apache.hadoop.hdfs.tools.DFSck.run(DFSck.java:147) > at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:70) > at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:84) > at org.apache.hadoop.hdfs.tools.DFSck.main(DFSck.java:379) > Connecting to namenode via http://10.18.106.228:50070/fsck?ugi=root&path=%2F > FSCK started by root (auth:SIMPLE) from ip for path / at Thu Oct 22 18:39:09 > CST 2015 > {code} > The stack trace should not be displayed -- This message was sent by Atlassian JIRA (v6.3.4#6332)