[ 
https://issues.apache.org/jira/browse/AMBARI-12579?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Sam Mingolelli updated AMBARI-12579:
------------------------------------
    Description: 
Discovered while trying to enable HBase replication to another cluster.  Only 
tested with Kerberos enabled.

Unable to run map reduce job, because hadoop FS has incorrect permissions.   
Workaround exists if you change permissions to /user to 777, or add hbase user 
to HDFS group and change permissions to 775.

{code}
hbase org.apache.hadoop.hbase.mapreduce.CopyTable 
--peer.adr=xxx.td.local,xxx.td.local,xxx.td.local:2181:/hbase-secure temp

2015-07-28 10:57:58,721 INFO  [main] mapreduce.TableOutputFormat: Created table 
instance for temp
Exception in thread "main" org.apache.hadoop.security.AccessControlException: 
Permission denied: user=hbase, access=WRITE, inode="/user":hdfs:hdfs:drwxr-xr-x
        at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkFsPermission(FSPermissionChecker.java:271)
        at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:257)
        at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:238)
        at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:179)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6812)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6794)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkAncestorAccess(FSNamesystem.java:6746)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInternal(FSNamesystem.java:4530)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInt(FSNamesystem.java:4500)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:4473)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:865)
        at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:614)
        at 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
        at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:619)
        at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:962)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2039)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2035)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:415)
        at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628)
        at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2033)
{code}

  was:
Discovered while trying to enable HBase replication to another cluster.  Only 
tested with Kerberos enabled.

{code}
Unable to run map reduce job, because hadoop FS has incorrect permissions.   
Workaround exists if you change permissions to /user to 777, or add hbase user 
to HDFS group and change permissions to 775.

hbase org.apache.hadoop.hbase.mapreduce.CopyTable 
--peer.adr=xxx.td.local,xxx.td.local,xxx.td.local:2181:/hbase-secure temp

2015-07-28 10:57:58,721 INFO  [main] mapreduce.TableOutputFormat: Created table 
instance for temp
Exception in thread "main" org.apache.hadoop.security.AccessControlException: 
Permission denied: user=hbase, access=WRITE, inode="/user":hdfs:hdfs:drwxr-xr-x
        at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkFsPermission(FSPermissionChecker.java:271)
        at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:257)
        at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:238)
        at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:179)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6812)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6794)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkAncestorAccess(FSNamesystem.java:6746)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInternal(FSNamesystem.java:4530)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInt(FSNamesystem.java:4500)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:4473)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:865)
        at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:614)
        at 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
        at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:619)
        at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:962)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2039)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2035)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:415)
        at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628)
        at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2033)
{code}


> HBase user cannot run MapReduce job, because does not have WRITE access on 
> inode /user
> --------------------------------------------------------------------------------------
>
>                 Key: AMBARI-12579
>                 URL: https://issues.apache.org/jira/browse/AMBARI-12579
>             Project: Ambari
>          Issue Type: Bug
>          Components: security
>    Affects Versions: 2.0.1
>            Reporter: Colin Cogan
>            Priority: Minor
>
> Discovered while trying to enable HBase replication to another cluster.  Only 
> tested with Kerberos enabled.
> Unable to run map reduce job, because hadoop FS has incorrect permissions.   
> Workaround exists if you change permissions to /user to 777, or add hbase 
> user to HDFS group and change permissions to 775.
> {code}
> hbase org.apache.hadoop.hbase.mapreduce.CopyTable 
> --peer.adr=xxx.td.local,xxx.td.local,xxx.td.local:2181:/hbase-secure temp
> 2015-07-28 10:57:58,721 INFO  [main] mapreduce.TableOutputFormat: Created 
> table instance for temp
> Exception in thread "main" org.apache.hadoop.security.AccessControlException: 
> Permission denied: user=hbase, access=WRITE, 
> inode="/user":hdfs:hdfs:drwxr-xr-x
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkFsPermission(FSPermissionChecker.java:271)
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:257)
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:238)
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:179)
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6812)
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6794)
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkAncestorAccess(FSNamesystem.java:6746)
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInternal(FSNamesystem.java:4530)
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInt(FSNamesystem.java:4500)
>         at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:4473)
>         at 
> org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:865)
>         at 
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:614)
>         at 
> org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
>         at 
> org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:619)
>         at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:962)
>         at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2039)
>         at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2035)
>         at java.security.AccessController.doPrivileged(Native Method)
>         at javax.security.auth.Subject.doAs(Subject.java:415)
>         at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628)
>         at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2033)
> {code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to