This is an automated email from the ASF dual-hosted git repository. abhay pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/ranger.git
The following commit(s) were added to refs/heads/master by this push: new 3e383969d RANGER-4817: Optimize Ranger HDFS Authorization by combining multiple authorization calls 3e383969d is described below commit 3e383969d759897112d114f6c03f5bd597c9b1f4 Author: Abhay Kulkarni <akulka...@cloudera.com> AuthorDate: Mon Jun 10 16:13:48 2024 -0700 RANGER-4817: Optimize Ranger HDFS Authorization by combining multiple authorization calls --- .../ranger/plugin/service/RangerBasePlugin.java | 2 +- .../plugin/util/RangerAccessRequestUtil.java | 10 + .../authorization/hadoop/RangerHdfsAuthorizer.java | 501 ++++++++++++++++++--- .../authorization/hadoop/RangerHdfsAuthorizer.java | 72 +-- 4 files changed, 453 insertions(+), 132 deletions(-) diff --git a/agents-common/src/main/java/org/apache/ranger/plugin/service/RangerBasePlugin.java b/agents-common/src/main/java/org/apache/ranger/plugin/service/RangerBasePlugin.java index 6a614bf2d..8db08c598 100644 --- a/agents-common/src/main/java/org/apache/ranger/plugin/service/RangerBasePlugin.java +++ b/agents-common/src/main/java/org/apache/ranger/plugin/service/RangerBasePlugin.java @@ -533,7 +533,7 @@ public class RangerBasePlugin { ret = policyEngine.evaluatePolicies(request, RangerPolicy.POLICY_TYPE_ACCESS, null); } - if (ret != null) { + if (ret != null && !RangerAccessRequestUtil.getIsSkipChainedPlugins(request.getContext())) { for (RangerChainedPlugin chainedPlugin : chainedPlugins) { if (LOG.isDebugEnabled()) { LOG.debug("BasePlugin.isAccessAllowed result=[" + ret + "]"); diff --git a/agents-common/src/main/java/org/apache/ranger/plugin/util/RangerAccessRequestUtil.java b/agents-common/src/main/java/org/apache/ranger/plugin/util/RangerAccessRequestUtil.java index a56ecb268..df0352ca9 100644 --- a/agents-common/src/main/java/org/apache/ranger/plugin/util/RangerAccessRequestUtil.java +++ b/agents-common/src/main/java/org/apache/ranger/plugin/util/RangerAccessRequestUtil.java @@ -53,6 +53,7 @@ public class RangerAccessRequestUtil { public static final String KEY_CONTEXT_IS_REQUEST_PREPROCESSED = "ISREQUESTPREPROCESSED"; public static final String KEY_CONTEXT_RESOURCE_ZONE_NAMES = "RESOURCE_ZONE_NAMES"; public static final String KEY_CONTEXT_ACCESS_TYPE_RESULTS = "_ACCESS_TYPE_RESULTS"; + public static final String KEY_CONTEXT_IS_SKIP_CHAINED_PLUGINS = "_IS_SKIP_CHAINED_PLUGINS"; public static void setRequestTagsInContext(Map<String, Object> context, Set<RangerTagForEval> tags) { if(CollectionUtils.isEmpty(tags)) { @@ -361,4 +362,13 @@ public class RangerAccessRequestUtil { results.putIfAbsent(accessType, result); } } + + public static void setIsSkipChainedPlugins(Map<String, Object> context, Boolean value) { + context.put(KEY_CONTEXT_IS_SKIP_CHAINED_PLUGINS, value); + } + + public static boolean getIsSkipChainedPlugins(Map<String, Object> context) { + Boolean value = (Boolean)context.get(KEY_CONTEXT_IS_SKIP_CHAINED_PLUGINS); + return value != null && value; + } } diff --git a/hdfs-agent/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java b/hdfs-agent/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java index b11ee62a3..c892bced3 100644 --- a/hdfs-agent/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java +++ b/hdfs-agent/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java @@ -29,9 +29,16 @@ import static org.apache.ranger.authorization.hadoop.constants.RangerHadoopConst import static org.apache.ranger.authorization.hadoop.constants.RangerHadoopConstants.ALL_PERM; import static org.apache.ranger.authorization.hadoop.constants.RangerHadoopConstants.ACCESS_TYPE_MONITOR_HEALTH; + import java.net.InetAddress; import java.security.SecureRandom; -import java.util.*; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.Stack; +import java.util.Objects; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.ArrayUtils; @@ -72,20 +79,38 @@ import com.google.common.collect.Sets; import org.apache.ranger.plugin.util.RangerAccessRequestUtil; public class RangerHdfsAuthorizer extends INodeAttributeProvider { - public static final String KEY_FILENAME = "FILENAME"; - public static final String KEY_BASE_FILENAME = "BASE_FILENAME"; - public static final String DEFAULT_FILENAME_EXTENSION_SEPARATOR = "."; + private static final Logger LOG = LoggerFactory.getLogger(RangerHdfsAuthorizer.class); + private static final Logger PERF_HDFSAUTH_REQUEST_LOG = RangerPerfTracer.getPerfLogger("hdfsauth.request"); - public static final String KEY_RESOURCE_PATH = "path"; + public static final String KEY_FILENAME = "FILENAME"; + public static final String KEY_BASE_FILENAME = "BASE_FILENAME"; + public static final String DEFAULT_FILENAME_EXTENSION_SEPARATOR = "."; + public static final String KEY_RESOURCE_PATH = "path"; + public static final String RANGER_FILENAME_EXTENSION_SEPARATOR_PROP = "ranger.plugin.hdfs.filename.extension.separator"; + public static final String OPERATION_NAME_CREATE = "create"; + public static final String OPERATION_NAME_DELETE = "delete"; + public static final String OPERATION_NAME_RENAME = "rename"; + public static final String OPERATION_NAME_LISTSTATUS = "listStatus"; + public static final String OPERATION_NAME_MKDIRS = "mkdirs"; + public static final String OPERATION_NAME_GETEZFORPATH = "getEZForPath"; - public static final String RANGER_FILENAME_EXTENSION_SEPARATOR_PROP = "ranger.plugin.hdfs.filename.extension.separator"; + private static final Set<String> OPTIMIZED_OPERATIONS = new HashSet<String>() {{ + add(OPERATION_NAME_CREATE); + add(OPERATION_NAME_DELETE); + add(OPERATION_NAME_RENAME); + add(OPERATION_NAME_LISTSTATUS); + add(OPERATION_NAME_MKDIRS); + add(OPERATION_NAME_GETEZFORPATH); + }}; + + private RangerHdfsPlugin rangerPlugin = null; + private final Map<FsAction, Set<String>> access2ActionListMapper = new HashMap<FsAction, Set<String>>(); + private final Path addlConfigFile; + private boolean AUTHZ_OPTIMIZATION_ENABLED = true; + + private final OptimizedAuthzContext OPT_BYPASS_AUTHZ = new OptimizedAuthzContext("", FsAction.NONE, FsAction.NONE, FsAction.NONE, AuthzStatus.ALLOW); - private static final Logger LOG = LoggerFactory.getLogger(RangerHdfsAuthorizer.class); - private static final Logger PERF_HDFSAUTH_REQUEST_LOG = RangerPerfTracer.getPerfLogger("hdfsauth.request"); - private RangerHdfsPlugin rangerPlugin = null; - private Map<FsAction, Set<String>> access2ActionListMapper = new HashMap<FsAction, Set<String>>(); - private final Path addlConfigFile; public RangerHdfsAuthorizer() { this(null); @@ -129,6 +154,11 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { rangerPlugin = plugin; + AUTHZ_OPTIMIZATION_ENABLED = plugin.getConfig().getBoolean("ranger.hdfs.authz.enable.optimization", false); + + LOG.info("AUTHZ_OPTIMIZATION_ENABLED:[" + AUTHZ_OPTIMIZATION_ENABLED + "]"); + + if(LOG.isDebugEnabled()) { LOG.debug("<== RangerHdfsAuthorizer.start()"); } @@ -153,32 +183,12 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { @Override public INodeAttributes getAttributes(String fullPath, INodeAttributes inode) { - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerHdfsAuthorizer.getAttributes(" + fullPath + ")"); - } - - INodeAttributes ret = inode; // return default attributes - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerHdfsAuthorizer.getAttributes(" + fullPath + "): " + ret); - } - - return ret; + return inode; // return default attributes } @Override public INodeAttributes getAttributes(String[] pathElements, INodeAttributes inode) { - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerHdfsAuthorizer.getAttributes(pathElementsCount=" + (pathElements == null ? 0 : pathElements.length) + ")"); - } - - INodeAttributes ret = inode; // return default attributes - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerHdfsAuthorizer.getAttributes(pathElementsCount=" + (pathElements == null ? 0 : pathElements.length) + "): " + ret); - } - - return ret; + return inode; } @Override @@ -204,7 +214,8 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { private enum AuthzStatus { ALLOW, DENY, NOT_DETERMINED } class RangerAccessControlEnforcer implements AccessControlEnforcer { - private INodeAttributeProvider.AccessControlEnforcer defaultEnforcer = null; + private final AccessControlEnforcer defaultEnforcer; + private Map<String, OptimizedAuthzContext> CACHE = null; public RangerAccessControlEnforcer(AccessControlEnforcer defaultEnforcer) { if(LOG.isDebugEnabled()) { @@ -264,7 +275,7 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { AuthzContext context = new AuthzContext(rangerPlugin, ugi, operationName, access == null && parentAccess == null && ancestorAccess == null && subAccess == null); if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerAccessControlEnforcer.checkPermission(" + LOG.debug("==> RangerAccessControlEnforcer.checkRangerPermission(" + "fsOwner=" + fsOwner + "; superGroup=" + superGroup + ", inodesCount=" + (inodes != null ? inodes.length : 0) + ", snapshotId=" + snapshotId + ", user=" + context.user + ", provided-path=" + path + ", ancestorIndex=" + ancestorIndex + ", doCheckOwner="+ doCheckOwner + ", ancestorAccess=" + ancestorAccess + ", parentAccess=" + parentAccess @@ -272,10 +283,15 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { + ", callerContext=" + callerContext +")"); } - RangerPerfTracer perf = null; + if (LOG.isDebugEnabled()) { + LOG.info("operationName={}, path={}, user={}, ancestorIndex={}, ancestorAccess={}, parentAccess={}, access={}, subAccess={}", context.operationName, path, context.user, ancestorIndex, ancestorAccess, parentAccess, access, subAccess); + } + + OptimizedAuthzContext optAuthzContext = null; + RangerPerfTracer perf = null; if(RangerPerfTracer.isPerfTraceEnabled(PERF_HDFSAUTH_REQUEST_LOG)) { - perf = RangerPerfTracer.getPerfTracer(PERF_HDFSAUTH_REQUEST_LOG, "RangerHdfsAuthorizer.checkPermission(provided-path=" + path + ")"); + perf = RangerPerfTracer.getPerfTracer(PERF_HDFSAUTH_REQUEST_LOG, "RangerHdfsAuthorizer.checkRangerPermission(provided-path=" + path + ")"); } try { @@ -289,9 +305,9 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { if (context.plugin != null && !ArrayUtils.isEmpty(inodes)) { int sz = inodeAttrs.length; - if (LOG.isDebugEnabled()) { - LOG.debug("Size of INodeAttrs array:[" + sz + "]"); - LOG.debug("Size of INodes array:[" + inodes.length + "]"); + if (LOG.isTraceEnabled()) { + LOG.trace("Size of INodeAttrs array:[" + sz + "]"); + LOG.trace("Size of INodes array:[" + inodes.length + "]"); } byte[][] components = new byte[sz][]; @@ -304,9 +320,9 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { } } if (i != sz) { - if (LOG.isDebugEnabled()) { - LOG.debug("Input INodeAttributes array contains null at position " + i); - LOG.debug("Will use only first [" + i + "] components"); + if (LOG.isTraceEnabled()) { + LOG.trace("Input INodeAttributes array contains null at position " + i); + LOG.trace("Will use only first [" + i + "] components"); } } @@ -314,8 +330,8 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { doNotGenerateAuditRecord = true; - if (LOG.isDebugEnabled()) { - LOG.debug("Using the only inode in the array to figure out path to resource. No audit record will be generated for this authorization request"); + if (LOG.isTraceEnabled()) { + LOG.trace("Using the only inode in the array to figure out path to resource. No audit record will be generated for this authorization request"); } resourcePath = inodes[0].getFullPathName(); @@ -324,12 +340,12 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { useDefaultAuthorizerOnly = true; - if (LOG.isDebugEnabled()) { - LOG.debug("path:[" + resourcePath + "] is for a snapshot, id=[" + snapshotId +"], default Authorizer will be used to authorize this request"); + if (LOG.isTraceEnabled()) { + LOG.trace("path:[" + resourcePath + "] is for a snapshot, id=[" + snapshotId +"], default Authorizer will be used to authorize this request"); } } else { - if (LOG.isDebugEnabled()) { - LOG.debug("path:[" + resourcePath + "] is not for a snapshot, id=[" + snapshotId +"]. It will be used to authorize this request"); + if (LOG.isTraceEnabled()) { + LOG.trace("path:[" + resourcePath + "] is not for a snapshot, id=[" + snapshotId +"]. It will be used to authorize this request"); } } } else { @@ -337,14 +353,14 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { if (snapshotId != Snapshot.CURRENT_STATE_ID) { resourcePath = DFSUtil.byteArray2PathString(pathByNameArr); - if (LOG.isDebugEnabled()) { - LOG.debug("pathByNameArr array is used to figure out path to resource, resourcePath:[" + resourcePath +"]"); + if (LOG.isTraceEnabled()) { + LOG.trace("pathByNameArr array is used to figure out path to resource, resourcePath:[" + resourcePath +"]"); } } else { resourcePath = DFSUtil.byteArray2PathString(components, 0, i); - if (LOG.isDebugEnabled()) { - LOG.debug("INodeAttributes array is used to figure out path to resource, resourcePath:[" + resourcePath +"]"); + if (LOG.isTraceEnabled()) { + LOG.trace("INodeAttributes array is used to figure out path to resource, resourcePath:[" + resourcePath +"]"); } } } @@ -355,12 +371,59 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null; ancestorIndex--); - authzStatus = useDefaultAuthorizerOnly ? AuthzStatus.NOT_DETERMINED : AuthzStatus.ALLOW; - ancestor = inodes.length > ancestorIndex && ancestorIndex >= 0 ? inodes[ancestorIndex] : null; parent = inodes.length > 1 ? inodes[inodes.length - 2] : null; inode = inodes[inodes.length - 1]; // could be null while creating a new file + /* + Check if optimization is done + */ + optAuthzContext = (new OperationOptimizer(operationName, resourcePath, ancestorAccess, parentAccess, access, subAccess, components, inodeAttrs, ancestorIndex, ancestor, parent, inode)).optimize(); + + if (optAuthzContext == OPT_BYPASS_AUTHZ) { + authzStatus = AuthzStatus.ALLOW; + + return; + } else if (optAuthzContext != null && optAuthzContext.authzStatus != null) { + authzStatus = optAuthzContext.authzStatus; + + if (LOG.isDebugEnabled()) { + LOG.debug("OperationOptimizer.optimize() returned " + authzStatus + ", operationName=" + operationName + " has been pre-computed. Returning without any access evaluation!"); + } + + if (authzStatus == AuthzStatus.ALLOW) { + return; + } + + final FsAction action; + + if (access != null) { + action = access; + } else if(parentAccess != null) { + action = parentAccess; + } else if(ancestorAccess != null) { + action = ancestorAccess; + } else { + action = FsAction.EXECUTE; + } + + throw new RangerAccessControlException("Permission denied: user=" + context.user + ", access=" + action + ", inode=\"" + resourcePath + "\""); + } else { + authzStatus = useDefaultAuthorizerOnly ? AuthzStatus.NOT_DETERMINED : AuthzStatus.ALLOW; + } + + if (LOG.isDebugEnabled()) { + LOG.debug("OperationOptimizer.optimize() returned null, operationName=" + operationName + " needs to be evaluated!"); + } + + if (optAuthzContext != null) { + access = optAuthzContext.access; + parentAccess = optAuthzContext.parentAccess; + ancestorAccess = optAuthzContext.ancestorAccess; + } + + context.isTraverseOnlyCheck = parentAccess == null && ancestorAccess == null && access == null && subAccess == null; + context.auditHandler = doNotGenerateAuditRecord ? null : new RangerHdfsAuditHandler(providedPath, context.isTraverseOnlyCheck, context.plugin.getHadoopModuleName(), context.plugin.getExcludedUsers(), callerContext != null ? callerContext.toString() : null); /* Hadoop versions prior to 2.8.0 didn't ask for authorization of parent/ancestor traversal for @@ -583,10 +646,17 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { context.auditHandler.flushAudit(); } + if (optAuthzContext != null && optAuthzContext != OPT_BYPASS_AUTHZ) { + if (LOG.isDebugEnabled()) { + LOG.debug("Updating OptimizedAuthzContext:[" + optAuthzContext + "] with authzStatus=" + authzStatus.name() + "]"); + } + optAuthzContext.authzStatus = authzStatus; + } + RangerPerfTracer.log(perf); if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerAccessControlEnforcer.checkPermission(" + resourcePath + ", " + access + ", user=" + context.user + ") : " + authzStatus); + LOG.debug("<== RangerAccessControlEnforcer.checkRangerPermission(" + resourcePath + ", " + access + ", user=" + context.user + ") : " + authzStatus); } } } @@ -600,7 +670,7 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { if (LOG.isDebugEnabled()) { LOG.debug("==> RangerAccessControlEnforcer.traverseOnlyCheck(" - + "path=" + path + ", user=" + context.user + ", groups=" + context.userGroups + ")"); + + "path=" + path + ", user=" + context.user + ", groups=" + context.userGroups + ", operationName=" + context.operationName + ")"); } final AuthzStatus ret; @@ -628,18 +698,18 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { resourcePath = resourcePath.substring(0, resourcePath.length()-1); } } - ret = isAccessAllowedForTraversal(nodeToCheck, nodeAttribs, resourcePath, skipAuditOnAllow, context); + ret = isAccessAllowedForTraversal(nodeToCheck, nodeAttribs, resourcePath, skipAuditOnAllow, context, context.operationName); } else { ret = AuthzStatus.ALLOW; } if (LOG.isDebugEnabled()) { LOG.debug("<== RangerAccessControlEnforcer.traverseOnlyCheck(" - + "path=" + path + ", resourcePath=" + resourcePath + ", user=" + context.user + ", groups=" + context.userGroups + ") : " + ret); + + "path=" + path + ", resourcePath=" + resourcePath + ", user=" + context.user + ", groups=" + context.userGroups + ", operationName=" + context.operationName + ") : " + ret); } return ret; } - private AuthzStatus isAccessAllowedForTraversal(INode inode, INodeAttributes inodeAttribs, String path, boolean skipAuditOnAllow, AuthzContext context) { + private AuthzStatus isAccessAllowedForTraversal(INode inode, INodeAttributes inodeAttribs, String path, boolean skipAuditOnAllow, AuthzContext context, String operation) { final AuthzStatus ret; String pathOwner = inodeAttribs != null ? inodeAttribs.getUserName() : null; FsAction access = FsAction.EXECUTE; @@ -653,10 +723,17 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { } if (LOG.isDebugEnabled()) { - LOG.debug("==> RangerAccessControlEnforcer.isAccessAllowedForTraversal(" + path + ", " + access + ", " + context.user + ", " + skipAuditOnAllow + ")"); + LOG.debug("==> RangerAccessControlEnforcer.isAccessAllowedForTraversal(" + path + ", " + access + ", " + context.user + ", " + skipAuditOnAllow + ", " + context.operationName + ")"); } - RangerHdfsAccessRequest request = new RangerHdfsAccessRequest(inode, path, pathOwner, access, EXECUTE_ACCCESS_TYPE, context.operationName, context.user, context.userGroups); + RangerHdfsAccessRequest request = new RangerHdfsAccessRequest(inode, path, pathOwner, access, EXECUTE_ACCCESS_TYPE, operation, context.user, context.userGroups); + + // if the request was already allowed by a Ranger policy (for ancestor/parent/node/child), skip chained plugin evaluations in subsequent calls + if (context.isAllowedByRangerPolicies) { + LOG.warn("This request is already allowed by Ranger policies. Ensuring that chained-plugins are not evaluated again for this request, request:[" + request + "]"); + + RangerAccessRequestUtil.setIsSkipChainedPlugins(request.getContext(), Boolean.TRUE); + } RangerAccessResult result = context.plugin.isAccessAllowed(request, null); @@ -667,6 +744,13 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { } else { ret = AuthzStatus.ALLOW; } + if (ret == AuthzStatus.ALLOW) { + if (LOG.isDebugEnabled()) { + LOG.debug("This request is for the first time allowed by Ranger policies. request:[" + request + "]"); + } + + context.isAllowedByRangerPolicies = true; + } if (ret == AuthzStatus.DENY || (!skipAuditOnAllow && result != null && result.getIsAccessDetermined())) { if (context.auditHandler != null) { @@ -675,7 +759,7 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { } if (LOG.isDebugEnabled()) { - LOG.debug("<== RangerAccessControlEnforcer.isAccessAllowedForTraversal(" + path + ", " + access + ", " + context.user + ", " + skipAuditOnAllow + "): " + ret); + LOG.debug("<== RangerAccessControlEnforcer.isAccessAllowedForTraversal(" + path + ", " + access + ", " + context.user + ", " + skipAuditOnAllow + ", " + context.operationName + "): " + ret); } return ret; @@ -801,6 +885,13 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { RangerAccessRequestUtil.setAllRequestedAccessTypes(request.getContext(), accessTypes); } + // if the request was already allowed by a Ranger policy (for ancestor/parent/node/child), skip chained plugin evaluations in subsequent calls + if (context.isAllowedByRangerPolicies) { + LOG.warn("This request is already allowed by Ranger policies. Ensuring that chained-plugins are not evaluated again for this request, request:[" + request + "]"); + + RangerAccessRequestUtil.setIsSkipChainedPlugins(request.getContext(), Boolean.TRUE); + } + RangerAccessResult result = context.plugin.isAccessAllowed(request, context.auditHandler); context.saveResult(result); @@ -812,6 +903,13 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { } else { // allowed ret = AuthzStatus.ALLOW; } + if (ret == AuthzStatus.ALLOW) { + if (LOG.isDebugEnabled()) { + LOG.debug("This request is for the first time allowed by Ranger policies. request:[" + request + "]"); + } + + context.isAllowedByRangerPolicies = true; + } } if(ret == null) { @@ -888,7 +986,280 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { return ret; } + /* + Description : optimize() checks if the given operation is a candidate for optimizing (reducing) the number of times it is authorized + Returns : null, if the operation, in its current invocation, cannot be optimized. + : OptimizedAuthzContext with the authzStatus set to null, if the operation in its current invocation needs to be authorized. However, the next invocation + for the same user and the resource can be optimized based on the result of the authorization. + : OptimizedAuthzContext with the authzStatus set to non-null, if the operation in its current invocation need not be authorized. + Algorithm : The algorithm is based on the specifics of each operation that is potentially optimized. + 1. OPERATION_NAME_COMPLETEFILE: + Skipping this authorization check may break semantic equivalence (according to HDFS team). Therefore, no optimization + is attempted for this operation. + 2. OPERATION_NAME_DELETE: + Namenode calls this twice when deleting a file. First invocation checks if the user has a EXECUTE access on the parent directory, and second invocation + checks if the user has a WRITE access on the parent directory as well as ALL access on the directory tree rooted at the parent directory. First invocation + can be optimized away and the second invocation is authorized with the parent directory access modified from WRITE to a WRITE_EXECUTE. + Namenode calls this three times when deleting a directory. The optimization code results in eliminating one authorization check out of three. + 3. OPERATION_NAME_CREATE, OPERATION_NAME_MKDIRS: + Namenode calls this twice when creating a new file or a directory. First invocation checks if the user has a EXECUTE access on the parent directory, and second invocation + checks if the user has a WRITE access to the parent directory. The optimized code combines these checks into a WRITE_EXECUTE access for the first invocation, + and optimizes away the second call. + Namenode calls this three times when re-creating an existing file. In addition to two invocations described above, it also checks if the user has + a WRITE access to the file itself. This extra call is not optimized. + 4. OPERATION_NAME_RENAME: + Namenode calls this twice when renaming a file for source as well as target directories. For each directory, first invocation checks if the user has a EXECUTE access on the parent directory, and second invocation + checks if the user has a WRITE access to the parent (or ancestor when checking target directory) . The optimized code combines these checks into a WRITE_EXECUTE access for the first invocation, + and optimizes away the second call. + 5. OPERATION_NAME_LISTSTATUS, OPERATION_NAME_GETEZFORPATH: + Namenode calls this twice when listing a directory or getting the encryption zone for the directory. First invocation checks if the user has a EXECUTE access + on the directory, and second checks if the user has a READ_EXECUTE access on the directory. The optimized code combines these checks into a READ_EXECUTE access + for the first invocation. + */ + + + class OperationOptimizer { + private final String operationName; + + private final byte[][] components; + private final INodeAttributes[] inodeAttrs; + private final int ancestorIndex; + private final INode ancestor; + private final INode parent; + private final INode inode; + + private String resourcePath; + private FsAction ancestorAccess; + private FsAction parentAccess; + private FsAction access; + private final FsAction subAccess; + + OperationOptimizer(String operationName, String resourcePath, FsAction ancestorAccess, FsAction parentAccess, FsAction access, FsAction subAccess, byte[][] components, INodeAttributes[] inodeAttrs, int ancestorIndex, INode ancestor, INode parent, INode inode) { + this.operationName = operationName; + + this.resourcePath = resourcePath; + this.ancestorAccess = ancestorAccess; + this.parentAccess = parentAccess; + this.access = access; + this.subAccess = subAccess; + + this.components = components; + this.inodeAttrs = inodeAttrs; + this.ancestorIndex = ancestorIndex; + this.ancestor = ancestor; + this.parent = parent; + this.inode = inode; + } + + OptimizedAuthzContext optimize() { + if (!AUTHZ_OPTIMIZATION_ENABLED || !OPTIMIZED_OPERATIONS.contains(operationName)) { + return null; + } + return optimizeOp(operationName); + } + + OptimizedAuthzContext optimizeOp(String operationName) { + switch (operationName) { + case OPERATION_NAME_CREATE: + return optimizeCreateOp(); + case OPERATION_NAME_DELETE: + return optimizeDeleteOp(); + case OPERATION_NAME_RENAME: + return optimizeRenameOp(); + case OPERATION_NAME_MKDIRS: + return optimizeMkdirsOp(); + case OPERATION_NAME_LISTSTATUS: + return optimizeListStatusOp(); + case OPERATION_NAME_GETEZFORPATH: + return optimizeGetEZForPathOp(); + default: + break; + } + return null; + } + + private OptimizedAuthzContext optimizeCreateOp() { + INode nodeToAuthorize = getINodeToAuthorize(); + if (nodeToAuthorize == null) { + return OPT_BYPASS_AUTHZ; + } + + if (!nodeToAuthorize.isDirectory() && access == null) { // If not a directory, the access must be non-null as when recreating existing file + if (LOG.isDebugEnabled()) { + LOG.debug("nodeToCheck is not a directory and access is null for a create operation! Optimization skipped"); + } + return null; + } + + return getOrCreateOptimizedAuthzContext(); + } + + private OptimizedAuthzContext optimizeDeleteOp() { + int numOfRequestedAccesses = 0; + + if (ancestorAccess != null) numOfRequestedAccesses++; + if (parentAccess != null) numOfRequestedAccesses++; + if (access != null) numOfRequestedAccesses++; + if (subAccess != null) numOfRequestedAccesses++; + + if (numOfRequestedAccesses == 0) { + return OPT_BYPASS_AUTHZ; + } else { + parentAccess = FsAction.WRITE_EXECUTE; + return getOrCreateOptimizedAuthzContext(); + } + } + + private OptimizedAuthzContext optimizeRenameOp() { + INode nodeToAuthorize = getINodeToAuthorize(); + + if (nodeToAuthorize == null) { + return OPT_BYPASS_AUTHZ; + } + + if (!nodeToAuthorize.isDirectory()) { + if (LOG.isDebugEnabled()) { + LOG.debug("nodeToCheck is not a directory for a rename operation! Optimization skipped"); + } + return null; + } + + return getOrCreateOptimizedAuthzContext(); + } + + private OptimizedAuthzContext optimizeMkdirsOp() { + INode nodeToAuthorize = getINodeToAuthorize(); + + if (nodeToAuthorize == null) { + return OPT_BYPASS_AUTHZ; + } + + if (!nodeToAuthorize.isDirectory()) { + if (LOG.isDebugEnabled()) { + LOG.debug("nodeToCheck is not a directory for a mkdirs operation! Optimization skipped"); + } + return null; + } + + return getOrCreateOptimizedAuthzContext(); + } + + private OptimizedAuthzContext optimizeListStatusOp() { + if (inode == null || inode.isFile()) { + if (LOG.isDebugEnabled()) { + LOG.debug("inode is null or is a file for a listStatus/getEZForPath operation! Optimization skipped"); + } + + return null; + } else { + if (resourcePath.length() > 1) { + if (resourcePath.endsWith(HDFS_ROOT_FOLDER_PATH)) { + resourcePath = resourcePath.substring(0, resourcePath.length() - 1); + } + } + access = FsAction.READ_EXECUTE; + + return getOrCreateOptimizedAuthzContext(); + } + } + + private OptimizedAuthzContext optimizeGetEZForPathOp() { + if (inode == null || inode.isFile()) { + if (LOG.isDebugEnabled()) { + LOG.debug("inode is null or is a file for a listStatus/getEZForPath operation! Optimization skipped"); + } + + return null; + } else { + access = FsAction.READ_EXECUTE; + + return getOrCreateOptimizedAuthzContext(); + } + } + + private INode getINodeToAuthorize() { + INode ret = null; + + INode nodeToAuthorize = inode; + + if (nodeToAuthorize == null || nodeToAuthorize.isFile()) { + // Case where the authorizer is called to authorize re-creation of an existing file. This is to check if the file itself is write-able + + if (StringUtils.equals(operationName, OPERATION_NAME_CREATE) && inode != null && access != null) { + if (LOG.isDebugEnabled()) { + LOG.debug("Create operation with non-null access is being authorized. authorize for write access for the file!!"); + } + } else { + if (parent != null) { + nodeToAuthorize = parent; + resourcePath = inodeAttrs.length > 0 ? DFSUtil.byteArray2PathString(components, 0, inodeAttrs.length - 1) : HDFS_ROOT_FOLDER_PATH; + parentAccess = FsAction.WRITE_EXECUTE; + } else if (ancestor != null) { + INodeAttributes nodeAttribs = inodeAttrs.length > ancestorIndex ? inodeAttrs[ancestorIndex] : null; + + nodeToAuthorize = ancestor; + resourcePath = nodeAttribs != null ? DFSUtil.byteArray2PathString(components, 0, ancestorIndex + 1) : HDFS_ROOT_FOLDER_PATH; + ancestorAccess = FsAction.WRITE_EXECUTE; + } + if (resourcePath.length() > 1) { + if (resourcePath.endsWith(HDFS_ROOT_FOLDER_PATH)) { + resourcePath = resourcePath.substring(0, resourcePath.length() - 1); + } + } + } + ret = nodeToAuthorize; + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("inode is not null and it is not a file for a create/rename/mkdirs operation! Optimization skipped"); + } + + } + return ret; + } + + private OptimizedAuthzContext getOrCreateOptimizedAuthzContext() { + if (CACHE == null) { + CACHE = new HashMap<>(); + } + + OptimizedAuthzContext opContext = CACHE.get(resourcePath); + + if (opContext == null) { + opContext = new OptimizedAuthzContext(resourcePath, ancestorAccess, parentAccess, access, null); + + CACHE.put(resourcePath, opContext); + + if (LOG.isDebugEnabled()) { + LOG.debug("Added OptimizedAuthzContext:[" + opContext + "] to cache"); + } + } + + return opContext; + } + } + } + + static class OptimizedAuthzContext { + private final String path; + private final FsAction ancestorAccess; + private final FsAction parentAccess; + private final FsAction access; + private AuthzStatus authzStatus; + + OptimizedAuthzContext(String path, FsAction ancestorAccess, FsAction parentAccess, FsAction access, AuthzStatus authzStatus) { + this.path = path; + this.ancestorAccess = ancestorAccess; + this.parentAccess = parentAccess; + this.access = access; + this.authzStatus = authzStatus; + } + + @Override + public String toString() { + return "path=" + path + ", authzStatus=" + authzStatus; + } } + } @@ -1051,7 +1422,8 @@ class AuthzContext { public final String user; public final Set<String> userGroups; public final String operationName; - public final boolean isTraverseOnlyCheck; + public boolean isTraverseOnlyCheck; + public boolean isAllowedByRangerPolicies; public RangerHdfsAuditHandler auditHandler = null; private RangerAccessResult lastResult = null; @@ -1252,4 +1624,3 @@ class RangerHdfsAuditHandler extends RangerDefaultAuditHandler { } } } - diff --git a/ranger-hdfs-plugin-shim/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java b/ranger-hdfs-plugin-shim/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java index 22d0b450d..b8adc3fd0 100644 --- a/ranger-hdfs-plugin-shim/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java +++ b/ranger-hdfs-plugin-shim/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java @@ -34,7 +34,7 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { private INodeAttributeProvider rangerHdfsAuthorizerImpl = null; private RangerPluginClassLoader rangerPluginClassLoader = null; - + public RangerHdfsAuthorizer() { if(LOG.isDebugEnabled()) { LOG.debug("==> RangerHdfsAuthorizer.RangerHdfsAuthorizer()"); @@ -80,13 +80,7 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { LOG.debug("==> RangerHdfsAuthorizer.start()"); } - try { - activatePluginClassLoader(); - - rangerHdfsAuthorizerImpl.start(); - } finally { - deactivatePluginClassLoader(); - } + rangerHdfsAuthorizerImpl.start(); if(LOG.isDebugEnabled()) { LOG.debug("<== RangerHdfsAuthorizer.start()"); @@ -99,13 +93,7 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { LOG.debug("==> RangerHdfsAuthorizer.stop()"); } - try { - activatePluginClassLoader(); - - rangerHdfsAuthorizerImpl.stop(); - } finally { - deactivatePluginClassLoader(); - } + rangerHdfsAuthorizerImpl.stop(); if(LOG.isDebugEnabled()) { LOG.debug("<== RangerHdfsAuthorizer.stop()"); @@ -114,65 +102,17 @@ public class RangerHdfsAuthorizer extends INodeAttributeProvider { @Override public INodeAttributes getAttributes(String fullPath, INodeAttributes inode) { - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerHdfsAuthorizer.getAttributes(" + fullPath + ")"); - } - - INodeAttributes ret = null; - - try { - activatePluginClassLoader(); - - ret = rangerHdfsAuthorizerImpl.getAttributes(fullPath,inode); // return default attributes - } finally { - deactivatePluginClassLoader(); - } - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerHdfsAuthorizer.getAttributes(" + fullPath + "): " + ret); - } - - return ret; + return rangerHdfsAuthorizerImpl.getAttributes(fullPath,inode); // return default attributes } @Override public INodeAttributes getAttributes(String[] pathElements, INodeAttributes inode) { - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerHdfsAuthorizer.getAttributes(pathElementsCount=" + (pathElements == null ? 0 : pathElements.length) + ")"); - } - - INodeAttributes ret = null; - - try { - activatePluginClassLoader(); - - ret = rangerHdfsAuthorizerImpl.getAttributes(pathElements,inode); - } finally { - deactivatePluginClassLoader(); - } - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerHdfsAuthorizer.getAttributes(pathElementsCount=" + (pathElements == null ? 0 : pathElements.length) + "): " + ret); - } - - return ret; + return rangerHdfsAuthorizerImpl.getAttributes(pathElements,inode); } @Override public AccessControlEnforcer getExternalAccessControlEnforcer(AccessControlEnforcer defaultEnforcer) { - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerHdfsAuthorizer.getExternalAccessControlEnforcer()"); - } - - AccessControlEnforcer ret = null; - - ret = rangerHdfsAuthorizerImpl.getExternalAccessControlEnforcer(defaultEnforcer); - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerHdfsAuthorizer.getExternalAccessControlEnforcer()"); - } - - return ret; + return rangerHdfsAuthorizerImpl.getExternalAccessControlEnforcer(defaultEnforcer); } private void activatePluginClassLoader() {