[jira] [Updated] (HBASE-20869) Endpoint-based Export use incorrect user to write to destination
[ https://issues.apache.org/jira/browse/HBASE-20869?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Wei-Chiu Chuang updated HBASE-20869: Attachment: HBASE-20869.master.001.patch > Endpoint-based Export use incorrect user to write to destination > > > Key: HBASE-20869 > URL: https://issues.apache.org/jira/browse/HBASE-20869 > Project: HBase > Issue Type: Bug > Components: Coprocessors >Affects Versions: 2.0.0 > Environment: Hadoop 3.0.0 + HBase 2.0.0, Kerberos. >Reporter: Wei-Chiu Chuang >Assignee: Wei-Chiu Chuang >Priority: Major > Attachments: HBASE-20869.master.001.patch > > > HBASE-15806 implemented an endpoint based export. It gets caller's HDFS > delegation token, and RegionServer is supposed to write out exported files as > the caller. > Everything works fine if you use run export as hbase user. However, once you > use a different user to export, it fails. > To reproduce, > Add to configuration key hbase.coprocessor.region.classes the coprocessor > class org.apache.hadoop.hbase.coprocessor.Export. > create a table t1, assign permission to a user foo: > > {noformat} > hbase(main):004:0> user_permission 't1' > User Namespace,Table,Family,Qualifier:Permission > hbase default,t1,,: [Permission: actions=READ,WRITE,EXEC,CREATE,ADMIN] > foo default,t1,,: [Permission: actions=READ,WRITE,EXEC,CREATE,ADMIN]{noformat} > > As user foo, execute the following command: > > {noformat} > $ hdfs dfs -mkdir /tmp/export_hbase2 > $ hbase org.apache.hadoop.hbase.coprocessor.Export t1 /tmp/export_hbase2/t2/ > > 18/07/10 14:03:59 INFO client.RpcRetryingCallerImpl: Call exception, tries=6, > retries=6, started=4457 ms ago, cancelled=false, > msg=org.apache.hadoop.security.AccessControlException: Permission denied: > user=hbase, access=WRITE, > inode="/tmp/export_hbase2/t2":foo:supergroup:drwxr-xr-x > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:400) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:256) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:194) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1846) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1830) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1789) > at > org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.resolvePathForStartFile(FSDirWriteFileOp.java:316) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2411) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2343) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:764) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:451) > at > org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:523) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:991) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:869) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:815) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:422) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1685) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2675) > at sun.reflect.GeneratedConstructorAccessor25.newInstance(Unknown Source) > at > sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) > at java.lang.reflect.Constructor.newInstance(Constructor.java:423) > at > org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) > at > org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) > at > org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:278) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1195) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1174) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1112) > at > org.apache.hadoop.hdfs.DistributedFileSystem$8.doCall(DistributedFileSystem.java:462) > at > org.apache.hadoop.hdfs.DistributedFileSystem$8.doCall(DistributedFileSystem.java:459) > at > org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) >
[jira] [Updated] (HBASE-20869) Endpoint-based Export use incorrect user to write to destination
[ https://issues.apache.org/jira/browse/HBASE-20869?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Wei-Chiu Chuang updated HBASE-20869: Status: Patch Available (was: Open) > Endpoint-based Export use incorrect user to write to destination > > > Key: HBASE-20869 > URL: https://issues.apache.org/jira/browse/HBASE-20869 > Project: HBase > Issue Type: Bug > Components: Coprocessors >Affects Versions: 2.0.0 > Environment: Hadoop 3.0.0 + HBase 2.0.0, Kerberos. >Reporter: Wei-Chiu Chuang >Assignee: Wei-Chiu Chuang >Priority: Major > Attachments: HBASE-20869.master.001.patch > > > HBASE-15806 implemented an endpoint based export. It gets caller's HDFS > delegation token, and RegionServer is supposed to write out exported files as > the caller. > Everything works fine if you use run export as hbase user. However, once you > use a different user to export, it fails. > To reproduce, > Add to configuration key hbase.coprocessor.region.classes the coprocessor > class org.apache.hadoop.hbase.coprocessor.Export. > create a table t1, assign permission to a user foo: > > {noformat} > hbase(main):004:0> user_permission 't1' > User Namespace,Table,Family,Qualifier:Permission > hbase default,t1,,: [Permission: actions=READ,WRITE,EXEC,CREATE,ADMIN] > foo default,t1,,: [Permission: actions=READ,WRITE,EXEC,CREATE,ADMIN]{noformat} > > As user foo, execute the following command: > > {noformat} > $ hdfs dfs -mkdir /tmp/export_hbase2 > $ hbase org.apache.hadoop.hbase.coprocessor.Export t1 /tmp/export_hbase2/t2/ > > 18/07/10 14:03:59 INFO client.RpcRetryingCallerImpl: Call exception, tries=6, > retries=6, started=4457 ms ago, cancelled=false, > msg=org.apache.hadoop.security.AccessControlException: Permission denied: > user=hbase, access=WRITE, > inode="/tmp/export_hbase2/t2":foo:supergroup:drwxr-xr-x > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:400) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:256) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:194) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1846) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1830) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1789) > at > org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.resolvePathForStartFile(FSDirWriteFileOp.java:316) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2411) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2343) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:764) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:451) > at > org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:523) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:991) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:869) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:815) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:422) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1685) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2675) > at sun.reflect.GeneratedConstructorAccessor25.newInstance(Unknown Source) > at > sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) > at java.lang.reflect.Constructor.newInstance(Constructor.java:423) > at > org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) > at > org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) > at > org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:278) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1195) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1174) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1112) > at > org.apache.hadoop.hdfs.DistributedFileSystem$8.doCall(DistributedFileSystem.java:462) > at > org.apache.hadoop.hdfs.DistributedFileSystem$8.doCall(DistributedFileSystem.java:459) > at > org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) > at
[jira] [Updated] (HBASE-20869) Endpoint-based Export use incorrect user to write to destination
[ https://issues.apache.org/jira/browse/HBASE-20869?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Wei-Chiu Chuang updated HBASE-20869: Attachment: HBASE-20869.master.002.patch > Endpoint-based Export use incorrect user to write to destination > > > Key: HBASE-20869 > URL: https://issues.apache.org/jira/browse/HBASE-20869 > Project: HBase > Issue Type: Bug > Components: Coprocessors >Affects Versions: 2.0.0 > Environment: Hadoop 3.0.0 + HBase 2.0.0, Kerberos. >Reporter: Wei-Chiu Chuang >Assignee: Wei-Chiu Chuang >Priority: Major > Attachments: HBASE-20869.master.001.patch, > HBASE-20869.master.002.patch > > > HBASE-15806 implemented an endpoint based export. It gets caller's HDFS > delegation token, and RegionServer is supposed to write out exported files as > the caller. > Everything works fine if you use run export as hbase user. However, once you > use a different user to export, it fails. > To reproduce, > Add to configuration key hbase.coprocessor.region.classes the coprocessor > class org.apache.hadoop.hbase.coprocessor.Export. > create a table t1, assign permission to a user foo: > > {noformat} > hbase(main):004:0> user_permission 't1' > User Namespace,Table,Family,Qualifier:Permission > hbase default,t1,,: [Permission: actions=READ,WRITE,EXEC,CREATE,ADMIN] > foo default,t1,,: [Permission: actions=READ,WRITE,EXEC,CREATE,ADMIN]{noformat} > > As user foo, execute the following command: > > {noformat} > $ hdfs dfs -mkdir /tmp/export_hbase2 > $ hbase org.apache.hadoop.hbase.coprocessor.Export t1 /tmp/export_hbase2/t2/ > > 18/07/10 14:03:59 INFO client.RpcRetryingCallerImpl: Call exception, tries=6, > retries=6, started=4457 ms ago, cancelled=false, > msg=org.apache.hadoop.security.AccessControlException: Permission denied: > user=hbase, access=WRITE, > inode="/tmp/export_hbase2/t2":foo:supergroup:drwxr-xr-x > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:400) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:256) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:194) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1846) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1830) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1789) > at > org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.resolvePathForStartFile(FSDirWriteFileOp.java:316) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2411) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2343) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:764) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:451) > at > org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:523) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:991) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:869) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:815) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:422) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1685) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2675) > at sun.reflect.GeneratedConstructorAccessor25.newInstance(Unknown Source) > at > sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) > at java.lang.reflect.Constructor.newInstance(Constructor.java:423) > at > org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) > at > org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) > at > org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:278) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1195) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1174) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1112) > at > org.apache.hadoop.hdfs.DistributedFileSystem$8.doCall(DistributedFileSystem.java:462) > at > org.apache.hadoop.hdfs.DistributedFileSystem$8.doCall(DistributedFileSystem.java:459) > at > org.apache.hadoop.fs.FileSystemLinkResolver.resolve(
[jira] [Updated] (HBASE-20869) Endpoint-based Export use incorrect user to write to destination
[ https://issues.apache.org/jira/browse/HBASE-20869?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Chia-Ping Tsai updated HBASE-20869: --- Fix Version/s: 2.0.2 2.1.0 3.0.0 > Endpoint-based Export use incorrect user to write to destination > > > Key: HBASE-20869 > URL: https://issues.apache.org/jira/browse/HBASE-20869 > Project: HBase > Issue Type: Bug > Components: Coprocessors >Affects Versions: 2.0.0 > Environment: Hadoop 3.0.0 + HBase 2.0.0, Kerberos. >Reporter: Wei-Chiu Chuang >Assignee: Wei-Chiu Chuang >Priority: Major > Fix For: 3.0.0, 2.1.0, 2.0.2 > > Attachments: HBASE-20869.master.001.patch, > HBASE-20869.master.002.patch > > > HBASE-15806 implemented an endpoint based export. It gets caller's HDFS > delegation token, and RegionServer is supposed to write out exported files as > the caller. > Everything works fine if you use run export as hbase user. However, once you > use a different user to export, it fails. > To reproduce, > Add to configuration key hbase.coprocessor.region.classes the coprocessor > class org.apache.hadoop.hbase.coprocessor.Export. > create a table t1, assign permission to a user foo: > > {noformat} > hbase(main):004:0> user_permission 't1' > User Namespace,Table,Family,Qualifier:Permission > hbase default,t1,,: [Permission: actions=READ,WRITE,EXEC,CREATE,ADMIN] > foo default,t1,,: [Permission: actions=READ,WRITE,EXEC,CREATE,ADMIN]{noformat} > > As user foo, execute the following command: > > {noformat} > $ hdfs dfs -mkdir /tmp/export_hbase2 > $ hbase org.apache.hadoop.hbase.coprocessor.Export t1 /tmp/export_hbase2/t2/ > > 18/07/10 14:03:59 INFO client.RpcRetryingCallerImpl: Call exception, tries=6, > retries=6, started=4457 ms ago, cancelled=false, > msg=org.apache.hadoop.security.AccessControlException: Permission denied: > user=hbase, access=WRITE, > inode="/tmp/export_hbase2/t2":foo:supergroup:drwxr-xr-x > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:400) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:256) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:194) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1846) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1830) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1789) > at > org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.resolvePathForStartFile(FSDirWriteFileOp.java:316) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2411) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2343) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:764) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:451) > at > org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:523) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:991) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:869) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:815) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:422) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1685) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2675) > at sun.reflect.GeneratedConstructorAccessor25.newInstance(Unknown Source) > at > sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) > at java.lang.reflect.Constructor.newInstance(Constructor.java:423) > at > org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) > at > org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) > at > org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:278) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1195) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1174) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1112) > at > org.apache.hadoop.hdfs.DistributedFileSystem$8.doCall(DistributedFileSystem.java:462) > at > org.apache.hadoop.hdfs.DistributedFileSystem$8.doCall(DistributedFileSyst
[jira] [Updated] (HBASE-20869) Endpoint-based Export use incorrect user to write to destination
[ https://issues.apache.org/jira/browse/HBASE-20869?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Chia-Ping Tsai updated HBASE-20869: --- Fix Version/s: (was: 2.1.0) 2.1.1 > Endpoint-based Export use incorrect user to write to destination > > > Key: HBASE-20869 > URL: https://issues.apache.org/jira/browse/HBASE-20869 > Project: HBase > Issue Type: Bug > Components: Coprocessors >Affects Versions: 2.0.0 > Environment: Hadoop 3.0.0 + HBase 2.0.0, Kerberos. >Reporter: Wei-Chiu Chuang >Assignee: Wei-Chiu Chuang >Priority: Major > Fix For: 3.0.0, 2.0.2, 2.1.1 > > Attachments: HBASE-20869.master.001.patch, > HBASE-20869.master.002.patch > > > HBASE-15806 implemented an endpoint based export. It gets caller's HDFS > delegation token, and RegionServer is supposed to write out exported files as > the caller. > Everything works fine if you use run export as hbase user. However, once you > use a different user to export, it fails. > To reproduce, > Add to configuration key hbase.coprocessor.region.classes the coprocessor > class org.apache.hadoop.hbase.coprocessor.Export. > create a table t1, assign permission to a user foo: > > {noformat} > hbase(main):004:0> user_permission 't1' > User Namespace,Table,Family,Qualifier:Permission > hbase default,t1,,: [Permission: actions=READ,WRITE,EXEC,CREATE,ADMIN] > foo default,t1,,: [Permission: actions=READ,WRITE,EXEC,CREATE,ADMIN]{noformat} > > As user foo, execute the following command: > > {noformat} > $ hdfs dfs -mkdir /tmp/export_hbase2 > $ hbase org.apache.hadoop.hbase.coprocessor.Export t1 /tmp/export_hbase2/t2/ > > 18/07/10 14:03:59 INFO client.RpcRetryingCallerImpl: Call exception, tries=6, > retries=6, started=4457 ms ago, cancelled=false, > msg=org.apache.hadoop.security.AccessControlException: Permission denied: > user=hbase, access=WRITE, > inode="/tmp/export_hbase2/t2":foo:supergroup:drwxr-xr-x > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:400) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:256) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:194) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1846) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1830) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1789) > at > org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.resolvePathForStartFile(FSDirWriteFileOp.java:316) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2411) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2343) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:764) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:451) > at > org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:523) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:991) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:869) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:815) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:422) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1685) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2675) > at sun.reflect.GeneratedConstructorAccessor25.newInstance(Unknown Source) > at > sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) > at java.lang.reflect.Constructor.newInstance(Constructor.java:423) > at > org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) > at > org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) > at > org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:278) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1195) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1174) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1112) > at > org.apache.hadoop.hdfs.DistributedFileSystem$8.doCall(DistributedFileSystem.java:462) > at > org.apache.hadoop.hdfs.DistributedFileSystem$8.doCall(DistributedFileSystem.java:459) >
[jira] [Updated] (HBASE-20869) Endpoint-based Export use incorrect user to write to destination
[ https://issues.apache.org/jira/browse/HBASE-20869?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Chia-Ping Tsai updated HBASE-20869: --- Fix Version/s: 2.2.0 > Endpoint-based Export use incorrect user to write to destination > > > Key: HBASE-20869 > URL: https://issues.apache.org/jira/browse/HBASE-20869 > Project: HBase > Issue Type: Bug > Components: Coprocessors >Affects Versions: 2.0.0 > Environment: Hadoop 3.0.0 + HBase 2.0.0, Kerberos. >Reporter: Wei-Chiu Chuang >Assignee: Wei-Chiu Chuang >Priority: Major > Fix For: 3.0.0, 2.0.2, 2.2.0, 2.1.1 > > Attachments: HBASE-20869.master.001.patch, > HBASE-20869.master.002.patch > > > HBASE-15806 implemented an endpoint based export. It gets caller's HDFS > delegation token, and RegionServer is supposed to write out exported files as > the caller. > Everything works fine if you use run export as hbase user. However, once you > use a different user to export, it fails. > To reproduce, > Add to configuration key hbase.coprocessor.region.classes the coprocessor > class org.apache.hadoop.hbase.coprocessor.Export. > create a table t1, assign permission to a user foo: > > {noformat} > hbase(main):004:0> user_permission 't1' > User Namespace,Table,Family,Qualifier:Permission > hbase default,t1,,: [Permission: actions=READ,WRITE,EXEC,CREATE,ADMIN] > foo default,t1,,: [Permission: actions=READ,WRITE,EXEC,CREATE,ADMIN]{noformat} > > As user foo, execute the following command: > > {noformat} > $ hdfs dfs -mkdir /tmp/export_hbase2 > $ hbase org.apache.hadoop.hbase.coprocessor.Export t1 /tmp/export_hbase2/t2/ > > 18/07/10 14:03:59 INFO client.RpcRetryingCallerImpl: Call exception, tries=6, > retries=6, started=4457 ms ago, cancelled=false, > msg=org.apache.hadoop.security.AccessControlException: Permission denied: > user=hbase, access=WRITE, > inode="/tmp/export_hbase2/t2":foo:supergroup:drwxr-xr-x > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:400) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:256) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:194) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1846) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1830) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1789) > at > org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.resolvePathForStartFile(FSDirWriteFileOp.java:316) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2411) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2343) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:764) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:451) > at > org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:523) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:991) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:869) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:815) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:422) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1685) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2675) > at sun.reflect.GeneratedConstructorAccessor25.newInstance(Unknown Source) > at > sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) > at java.lang.reflect.Constructor.newInstance(Constructor.java:423) > at > org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) > at > org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) > at > org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:278) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1195) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1174) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1112) > at > org.apache.hadoop.hdfs.DistributedFileSystem$8.doCall(DistributedFileSystem.java:462) > at > org.apache.hadoop.hdfs.DistributedFileSystem$8.doCall(DistributedFileSystem.java:459) > at > org.apache.hadoop.fs.F
[jira] [Updated] (HBASE-20869) Endpoint-based Export use incorrect user to write to destination
[ https://issues.apache.org/jira/browse/HBASE-20869?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Chia-Ping Tsai updated HBASE-20869: --- Resolution: Fixed Hadoop Flags: Reviewed Status: Resolved (was: Patch Available) [~jojochuang] Thanks for the contribution! > Endpoint-based Export use incorrect user to write to destination > > > Key: HBASE-20869 > URL: https://issues.apache.org/jira/browse/HBASE-20869 > Project: HBase > Issue Type: Bug > Components: Coprocessors >Affects Versions: 2.0.0 > Environment: Hadoop 3.0.0 + HBase 2.0.0, Kerberos. >Reporter: Wei-Chiu Chuang >Assignee: Wei-Chiu Chuang >Priority: Major > Fix For: 3.0.0, 2.0.2, 2.2.0, 2.1.1 > > Attachments: HBASE-20869.master.001.patch, > HBASE-20869.master.002.patch > > > HBASE-15806 implemented an endpoint based export. It gets caller's HDFS > delegation token, and RegionServer is supposed to write out exported files as > the caller. > Everything works fine if you use run export as hbase user. However, once you > use a different user to export, it fails. > To reproduce, > Add to configuration key hbase.coprocessor.region.classes the coprocessor > class org.apache.hadoop.hbase.coprocessor.Export. > create a table t1, assign permission to a user foo: > > {noformat} > hbase(main):004:0> user_permission 't1' > User Namespace,Table,Family,Qualifier:Permission > hbase default,t1,,: [Permission: actions=READ,WRITE,EXEC,CREATE,ADMIN] > foo default,t1,,: [Permission: actions=READ,WRITE,EXEC,CREATE,ADMIN]{noformat} > > As user foo, execute the following command: > > {noformat} > $ hdfs dfs -mkdir /tmp/export_hbase2 > $ hbase org.apache.hadoop.hbase.coprocessor.Export t1 /tmp/export_hbase2/t2/ > > 18/07/10 14:03:59 INFO client.RpcRetryingCallerImpl: Call exception, tries=6, > retries=6, started=4457 ms ago, cancelled=false, > msg=org.apache.hadoop.security.AccessControlException: Permission denied: > user=hbase, access=WRITE, > inode="/tmp/export_hbase2/t2":foo:supergroup:drwxr-xr-x > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:400) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:256) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:194) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1846) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1830) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1789) > at > org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.resolvePathForStartFile(FSDirWriteFileOp.java:316) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2411) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2343) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:764) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:451) > at > org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:523) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:991) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:869) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:815) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:422) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1685) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2675) > at sun.reflect.GeneratedConstructorAccessor25.newInstance(Unknown Source) > at > sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) > at java.lang.reflect.Constructor.newInstance(Constructor.java:423) > at > org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) > at > org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) > at > org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:278) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1195) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1174) > at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1112) > at > org.apache.hadoop.hdfs.DistributedFileSystem$8.doCall(DistributedFileSystem.java:462) > a