[ 
https://issues.apache.org/jira/browse/AMBARI-11704?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Sumit Mohanty updated AMBARI-11704:
-----------------------------------
    Attachment: AMBARI-11704.patch

> Mahout service check fails due to bad folder permission for /user/ambari-qa
> ---------------------------------------------------------------------------
>
>                 Key: AMBARI-11704
>                 URL: https://issues.apache.org/jira/browse/AMBARI-11704
>             Project: Ambari
>          Issue Type: Bug
>          Components: stacks
>    Affects Versions: 2.1.0
>            Reporter: Sumit Mohanty
>            Assignee: Sumit Mohanty
>            Priority: Critical
>             Fix For: 2.1.0
>
>         Attachments: AMBARI-11704.patch
>
>
> On a centos6 cluster, Mahout service check failed with the following:
> {noformat}
> Traceback (most recent call last):
>   File 
> "/var/lib/ambari-agent/cache/common-services/MAHOUT/1.0.0.2.3/package/scripts/service_check.py",
>  line 74, in <module>
>     MahoutServiceCheck().execute()
>   File 
> "/usr/lib/python2.6/site-packages/resource_management/libraries/script/script.py",
>  line 216, in execute
>     method(env)
>   File 
> "/var/lib/ambari-agent/cache/common-services/MAHOUT/1.0.0.2.3/package/scripts/service_check.py",
>  line 61, in service_check
>     user = params.smokeuser
>   File "/usr/lib/python2.6/site-packages/resource_management/core/base.py", 
> line 157, in __init__
>     self.env.run()
>   File 
> "/usr/lib/python2.6/site-packages/resource_management/core/environment.py", 
> line 152, in run
>     self.run_action(resource, action)
>   File 
> "/usr/lib/python2.6/site-packages/resource_management/core/environment.py", 
> line 118, in run_action
>     provider_action()
>   File 
> "/usr/lib/python2.6/site-packages/resource_management/core/providers/system.py",
>  line 251, in action_run
>     raise ex
> resource_management.core.exceptions.Fail: Execution of 'mahout seqdirectory 
> --input /user/ambari-qa/mahoutsmokeinput/sample-mahout-test.txt --output 
> /user/ambari-qa/mahoutsmokeoutput/ --charset utf-8' returned 1. MAHOUT_LOCAL 
> is not set; adding HADOOP_CONF_DIR to classpath.
> Running on hadoop, using /usr/hdp/current/hadoop-client/bin/hadoop and 
> HADOOP_CONF_DIR=/usr/hdp/current/hadoop-client/conf
> MAHOUT-JOB: 
> /usr/hdp/2.3.0.0-2216/mahout/mahout-examples-1.0.0.2.3.0.0-2216-job.jar
> WARNING: Use "yarn jar" to launch YARN applications.
> 15/06/02 20:42:48 WARN driver.MahoutDriver: No seqdirectory.props found on 
> classpath, will use command-line arguments only
> 15/06/02 20:42:49 INFO common.AbstractJob: Command line arguments: 
> {--charset=[utf-8], --chunkSize=[64], --endPhase=[2147483647], 
> --fileFilterClass=[org.apache.mahout.text.PrefixAdditionFilter], 
> --input=[/user/ambari-qa/mahoutsmokeinput/sample-mahout-test.txt], 
> --keyPrefix=[], --method=[mapreduce], 
> --output=[/user/ambari-qa/mahoutsmokeoutput/], --startPhase=[0], 
> --tempDir=[temp]}
> 15/06/02 20:42:52 INFO impl.TimelineClientImpl: Timeline service address: 
> http://ip-172-31-39-39.ec2.internal:8188/ws/v1/timeline/
> 15/06/02 20:42:52 INFO client.RMProxy: Connecting to ResourceManager at 
> ip-172-31-39-40.ec2.internal/172.31.39.40:8050
> Exception in thread "main" org.apache.hadoop.security.AccessControlException: 
> Permission denied: user=ambari-qa, access=WRITE, 
> inode="/user/ambari-qa/.staging":hdfs:hdfs:drwxr-xr-x
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:319)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:292)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:213)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:190)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1698)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1682)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1665)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSDirMkdirOp.mkdirs(FSDirMkdirOp.java:71)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:3909)
>       at 
> org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:982)
>       at 
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:622)
>       at 
> org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
>       at 
> org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616)
>       at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:969)
>       at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2081)
>       at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2077)
>       at java.security.AccessController.doPrivileged(Native Method)
>       at javax.security.auth.Subject.doAs(Subject.java:415)
>       at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
>       at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2075)
>       at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
>       at 
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
>       at 
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
>       at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
>       at 
> org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106)
>       at 
> org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:73)
>       at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:3009)
>       at org.apache.hadoop.hdfs.DFSClient.mkdirs(DFSClient.java:2977)
>       at 
> org.apache.hadoop.hdfs.DistributedFileSystem$21.doCall(DistributedFileSystem.java:1047)
>       at 
> org.apache.hadoop.hdfs.DistributedFileSystem$21.doCall(DistributedFileSystem.java:1043)
>       at 
> org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
>       at 
> org.apache.hadoop.hdfs.DistributedFileSystem.mkdirsInternal(DistributedFileSystem.java:1043)
>       at 
> org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs(DistributedFileSystem.java:1036)
>       at 
> org.apache.hadoop.mapreduce.JobSubmissionFiles.getStagingDir(JobSubmissionFiles.java:133)
>       at 
> org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:144)
>       at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1290)
>       at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1287)
>       at java.security.AccessController.doPrivileged(Native Method)
>       at javax.security.auth.Subject.doAs(Subject.java:415)
>       at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
>       at org.apache.hadoop.mapreduce.Job.submit(Job.java:1287)
>       at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1308)
>       at 
> org.apache.mahout.text.SequenceFilesFromDirectory.runMapReduce(SequenceFilesFromDirectory.java:183)
>       at 
> org.apache.mahout.text.SequenceFilesFromDirectory.run(SequenceFilesFromDirectory.java:91)
>       at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:70)
>       at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:84)
>       at 
> org.apache.mahout.text.SequenceFilesFromDirectory.main(SequenceFilesFromDirectory.java:65)
>       at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>       at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
>       at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>       at java.lang.reflect.Method.invoke(Method.java:606)
>       at 
> org.apache.hadoop.util.ProgramDriver$ProgramDescription.invoke(ProgramDriver.java:71)
>       at org.apache.hadoop.util.ProgramDriver.run(ProgramDriver.java:144)
>       at org.apache.hadoop.util.ProgramDriver.driver(ProgramDriver.java:152)
>       at org.apache.mahout.driver.MahoutDriver.main(MahoutDriver.java:195)
>       at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>       at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
>       at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>       at java.lang.reflect.Method.invoke(Method.java:606)
>       at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
>       at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
> Caused by: 
> org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.security.AccessControlException):
>  Permission denied: user=ambari-qa, access=WRITE, 
> inode="/user/ambari-qa/.staging":hdfs:hdfs:drwxr-xr-x
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:319)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:292)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:213)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:190)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1698)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1682)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1665)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSDirMkdirOp.mkdirs(FSDirMkdirOp.java:71)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:3909)
>       at 
> org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:982)
>       at 
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:622)
>       at 
> org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
>       at 
> org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616)
>       at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:969)
>       at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2081)
>       at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2077)
>       at java.security.AccessController.doPrivileged(Native Method)
>       at javax.security.auth.Subject.doAs(Subject.java:415)
>       at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
>       at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2075)
>       at org.apache.hadoop.ipc.Client.call(Client.java:1427)
>       at org.apache.hadoop.ipc.Client.call(Client.java:1358)
>       at 
> org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229)
>       at com.sun.proxy.$Proxy10.mkdirs(Unknown Source)
>       at 
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.mkdirs(ClientNamenodeProtocolTranslatorPB.java:558)
>       at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>       at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
>       at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>       at java.lang.reflect.Method.invoke(Method.java:606)
>       at 
> org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187)
>       at 
> org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
>       at com.sun.proxy.$Proxy11.mkdirs(Unknown Source)
>       at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:3007)
>       ... 34 more
> {noformat}
> The root cause seems to be
> {noformat}
> hdfs dfs -ls /user
> Found 6 items
> drwx------   - accumulo hdfs          0 2015-06-02 20:40 /user/accumulo
> drwxr-xr-x   - hdfs     hdfs          0 2015-06-02 20:41 /user/ambari-qa
> drwxr-xr-x   - hcat     hdfs          0 2015-06-02 20:41 /user/hcat
> drwx------   - hive     hdfs          0 2015-06-02 20:41 /user/hive
> drwxrwxr-x   - oozie    hdfs          0 2015-06-02 20:43 /user/oozie
> drwxrwxr-x   - spark    hdfs          0 2015-06-02 20:37 /user/spark
> {noformat}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to