Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 6e7b7d4ca -> f85bf4cb9


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f85bf4cb/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
new file mode 100644
index 0000000..314d00c
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -0,0 +1,637 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.coprocessor;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Optional;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.AuthUtil;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.ObserverContextImpl;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
+import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
+import 
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
+import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
+import org.apache.hadoop.hbase.security.AccessDeniedException;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.security.access.AccessControlClient;
+import org.apache.hadoop.hbase.security.access.AccessControlUtil;
+import org.apache.hadoop.hbase.security.access.AuthResult;
+import org.apache.hadoop.hbase.security.access.Permission;
+import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.hadoop.hbase.security.access.UserPermission;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.phoenix.coprocessor.PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.MetaDataUtil;
+
+import com.google.common.collect.Lists;
+import com.google.protobuf.RpcCallback;
+
+public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
+
+    private PhoenixMetaDataControllerEnvironment env;
+    private ArrayList<MasterObserver> accessControllers;
+    private boolean accessCheckEnabled;
+    private UserProvider userProvider;
+    private boolean isAutomaticGrantEnabled;
+    private boolean isStrictMode;
+    public static final Log LOG = 
LogFactory.getLog(PhoenixAccessController.class);
+    private static final Log AUDITLOG =
+            
LogFactory.getLog("SecurityLogger."+PhoenixAccessController.class.getName());
+    
+    @Override
+    public Optional<MetaDataEndpointObserver> getPhoenixObserver() {
+        return Optional.of(this);
+    }
+    
+    private List<MasterObserver> getAccessControllers() throws IOException {
+        if (accessControllers == null) {
+            synchronized (this) {
+                if (accessControllers == null) {
+                    accessControllers = new ArrayList<MasterObserver>();
+                    RegionCoprocessorHost cpHost = 
this.env.getCoprocessorHost();
+                    for (RegionCoprocessor cp : 
cpHost.findCoprocessors(RegionCoprocessor.class)) {
+                        if (cp instanceof AccessControlService.Interface && cp 
instanceof MasterObserver) {
+                            accessControllers.add((MasterObserver)cp);
+                        }
+                    }
+                }
+            }
+        }
+        return accessControllers;
+    }
+
+    public ObserverContext<MasterCoprocessorEnvironment> 
getMasterObsevrverContext() throws IOException {
+        return new 
ObserverContextImpl<MasterCoprocessorEnvironment>(getActiveUser());
+    }
+    
+    @Override
+    public void 
preGetTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String 
tenantId,
+            String tableName, TableName physicalTableName) throws IOException {
+        for (MasterObserver observer : getAccessControllers()) {
+            observer.preGetTableDescriptors(getMasterObsevrverContext(), 
Lists.newArrayList(physicalTableName),
+                    Collections.<TableDescriptor> emptyList(), null);
+        }
+    }
+
+    @Override
+    public void start(CoprocessorEnvironment env) throws IOException {
+        Configuration conf = env.getConfiguration();
+        this.accessCheckEnabled = 
conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
+                QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
+        
this.isAutomaticGrantEnabled=conf.getBoolean(QueryServices.PHOENIX_AUTOMATIC_GRANT_ENABLED,
+                QueryServicesOptions.DEFAULT_PHOENIX_AUTOMATIC_GRANT_ENABLED);
+        if (!this.accessCheckEnabled) {
+            LOG.warn("PhoenixAccessController has been loaded with 
authorization checks disabled.");
+        }
+        if (env instanceof PhoenixMetaDataControllerEnvironment) {
+            this.env = (PhoenixMetaDataControllerEnvironment)env;
+        } else {
+            throw new IllegalArgumentException(
+                    "Not a valid environment, should be loaded by 
PhoenixMetaDataControllerEnvironment");
+        }
+        // set the user-provider.
+        this.userProvider = UserProvider.instantiate(env.getConfiguration());
+        this.isStrictMode = 
conf.getBoolean(QueryServices.PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED,
+                
QueryServicesOptions.DEFAULT_PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED);
+        // init superusers and add the server principal (if using security)
+        // or process owner as default super user.
+        Superusers.initialize(env.getConfiguration());
+    }
+
+    @Override
+    public void stop(CoprocessorEnvironment env) throws IOException {}
+
+    @Override
+    public void 
preCreateTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, 
String tenantId,
+            String tableName, TableName physicalTableName, TableName 
parentPhysicalTableName, PTableType tableType,
+            Set<byte[]> familySet, Set<TableName> indexes) throws IOException {
+        if (!accessCheckEnabled) { return; }
+        
+        if (tableType != PTableType.VIEW) {
+            TableDescriptorBuilder tableDescBuilder = 
TableDescriptorBuilder.newBuilder(physicalTableName);
+            for (byte[] familyName : familySet) {
+                
tableDescBuilder.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(familyName).build());
+            }
+            final TableDescriptor htd = tableDescBuilder.build();
+            for (MasterObserver observer : getAccessControllers()) {
+                observer.preCreateTable(getMasterObsevrverContext(), htd, 
null);
+            }
+        }
+
+        // Index and view require read access on parent physical table.
+        Set<TableName> physicalTablesChecked = new HashSet<TableName>();
+        if (tableType == PTableType.VIEW || tableType == PTableType.INDEX) {
+            physicalTablesChecked.add(parentPhysicalTableName);
+            requireAccess("Create" + tableType, parentPhysicalTableName, 
Action.READ, Action.EXEC);
+        }
+
+        if (tableType == PTableType.VIEW) {
+            
+            Action[] requiredActions = { Action.READ, Action.EXEC };
+            for (TableName index : indexes) {
+                if (!physicalTablesChecked.add(index)) {
+                    // skip check for local index as we have already check the 
ACLs above
+                    // And for same physical table multiple times like view 
index table
+                    continue;
+                }
+
+                User user = getActiveUser();
+                List<UserPermission> permissionForUser = getPermissionForUser(
+                        getUserPermissions(index.getNameAsString()), 
Bytes.toBytes(user.getShortName()));
+                Set<Action> requireAccess = new HashSet<>();
+                Set<Action> accessExists = new HashSet<>();
+                if (permissionForUser != null) {
+                    for (UserPermission userPermission : permissionForUser) {
+                        for (Action action : Arrays.asList(requiredActions)) {
+                            if (!userPermission.implies(action)) {
+                                requireAccess.add(action);
+                            }
+                        }
+                    }
+                    if (!requireAccess.isEmpty()) {
+                        for (UserPermission userPermission : 
permissionForUser) {
+                            
accessExists.addAll(Arrays.asList(userPermission.getActions()));
+                        }
+
+                    }
+                } else {
+                    requireAccess.addAll(Arrays.asList(requiredActions));
+                }
+                if (!requireAccess.isEmpty()) {
+                    byte[] indexPhysicalTable = index.getName();
+                    handleRequireAccessOnDependentTable("Create" + tableType, 
user.getName(),
+                            TableName.valueOf(indexPhysicalTable), tableName, 
requireAccess, accessExists);
+                }
+            }
+
+        }
+
+        if (tableType == PTableType.INDEX) {
+            // All the users who have READ access on data table should have 
access to Index table as well.
+            // WRITE is needed for the index updates done by the user who has 
WRITE access on data table.
+            // CREATE is needed during the drop of the table.
+            // We are doing this because existing user while querying data 
table should not see access denied for the
+            // new indexes.
+            // TODO: confirm whether granting permission from coprocessor is a 
security leak.(currently it is done if
+            // automatic grant is enabled explicitly by user in configuration
+            // skip check for local index
+            if (physicalTableName != null && 
!parentPhysicalTableName.equals(physicalTableName)
+                    && 
!MetaDataUtil.isViewIndex(physicalTableName.getNameAsString())) {
+                authorizeOrGrantAccessToUsers("Create" + tableType, 
parentPhysicalTableName,
+                        Arrays.asList(Action.READ, Action.WRITE, 
Action.CREATE, Action.EXEC, Action.ADMIN),
+                        physicalTableName);
+            }
+        }
+    }
+
+    
+    public void handleRequireAccessOnDependentTable(String request, String 
userName, TableName dependentTable,
+            String requestTable, Set<Action> requireAccess, Set<Action> 
accessExists) throws IOException {
+
+        if (!isStrictMode) {
+            AUDITLOG.warn("Strict mode is not enabled, so " + request + " is 
allowed but User:" + userName
+                    + " will not have following access " + requireAccess + " 
to the existing dependent physical table "
+                    + dependentTable);
+            return;
+        }
+        if (isAutomaticGrantEnabled) {
+            Set<Action> unionSet = new HashSet<Action>();
+            unionSet.addAll(requireAccess);
+            unionSet.addAll(accessExists);
+            AUDITLOG.info(request + ": Automatically granting access to index 
table during creation of view:"
+                    + requestTable + authString(userName, dependentTable, 
requireAccess));
+            grantPermissions(userName, dependentTable.getName(), 
unionSet.toArray(new Action[0]));
+        } else {
+            throw new AccessDeniedException(
+                    "Insufficient permissions for users of dependent table" + 
authString(userName, dependentTable, requireAccess));
+        }
+    }
+    
+    private void grantPermissions(final String toUser, final byte[] table, 
final Action... actions) throws IOException {
+        User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws Exception {
+                try (Connection conn = 
ConnectionFactory.createConnection(env.getConfiguration())) {
+                    AccessControlClient.grant(conn, TableName.valueOf(table), 
toUser , null, null,
+                            actions);
+                } catch (Throwable e) {
+                    new DoNotRetryIOException(e);
+                }
+                return null;
+            }
+        });
+    }
+
+    private void authorizeOrGrantAccessToUsers(final String request, final 
TableName fromTable,
+            final List<Action> requiredActionsOnTable, final TableName toTable)
+            throws IOException {
+        User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws IOException {
+                try (Connection conn = 
ConnectionFactory.createConnection(env.getConfiguration())) {
+                    List<UserPermission> userPermissions = 
getUserPermissions(fromTable.getNameAsString());
+                    List<UserPermission> permissionsOnTheTable = 
getUserPermissions(toTable.getNameAsString());
+                    if (userPermissions != null) {
+                        for (UserPermission userPermission : userPermissions) {
+                            Set<Action> requireAccess = new HashSet<Action>();
+                            Set<Action> accessExists = new HashSet<Action>();
+                            List<UserPermission> permsToTable = 
getPermissionForUser(permissionsOnTheTable,
+                                    userPermission.getUser());
+                            for (Action action : requiredActionsOnTable) {
+                                boolean haveAccess=false;
+                                if (userPermission.implies(action)) {
+                                    if (permsToTable == null) {
+                                        requireAccess.add(action);
+                                    } else {
+                                        for (UserPermission permToTable : 
permsToTable) {
+                                            if (permToTable.implies(action)) {
+                                                haveAccess=true;
+                                            }
+                                        }
+                                        if (!haveAccess) {
+                                            requireAccess.add(action);
+                                        }
+                                    }
+                                }
+                            }
+                            if (permsToTable != null) {
+                                // Append access to already existing access 
for the user
+                                for (UserPermission permToTable : 
permsToTable) {
+                                    
accessExists.addAll(Arrays.asList(permToTable.getActions()));
+                                }
+                            }
+                            if (!requireAccess.isEmpty()) {
+                                
if(AuthUtil.isGroupPrincipal(Bytes.toString(userPermission.getUser()))){
+                                    AUDITLOG.warn("Users of GROUP:" + 
Bytes.toString(userPermission.getUser())
+                                            + " will not have following access 
" + requireAccess
+                                            + " to the newly created index " + 
toTable
+                                            + ", Automatic grant is not yet 
allowed on Groups");
+                                    continue;
+                                }
+                                handleRequireAccessOnDependentTable(request, 
Bytes.toString(userPermission.getUser()),
+                                        toTable, toTable.getNameAsString(), 
requireAccess, accessExists);
+                            }
+                        }
+                    }
+                }
+                return null;
+            }
+        });
+    }
+
+    private List<UserPermission> getPermissionForUser(List<UserPermission> 
perms, byte[] user) {
+        if (perms != null) {
+            // get list of permissions for the user as multiple implementation 
of AccessControl coprocessors can give
+            // permissions for same users
+            List<UserPermission> permissions = new ArrayList<>();
+            for (UserPermission p : perms) {
+                if (Bytes.equals(p.getUser(),user)){
+                     permissions.add(p);
+                }
+            }
+            if (!permissions.isEmpty()){
+               return permissions;
+            }
+        }
+        return null;
+    }
+
+    @Override
+    public void 
preDropTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String 
tenantId,
+            String tableName, TableName physicalTableName, TableName 
parentPhysicalTableName, PTableType tableType,
+            List<PTable> indexes) throws IOException {
+        if (!accessCheckEnabled) { return; }
+
+        for (MasterObserver observer : getAccessControllers()) {
+            if (tableType != PTableType.VIEW) {
+                observer.preDeleteTable(getMasterObsevrverContext(), 
physicalTableName);
+            }
+            if (indexes != null) {
+                for (PTable index : indexes) {
+                    observer.preDeleteTable(getMasterObsevrverContext(),
+                            
TableName.valueOf(index.getPhysicalName().getBytes()));
+                }
+            }
+        }
+        //checking similar permission checked during the create of the view.
+        if (tableType == PTableType.VIEW || tableType == PTableType.INDEX) {
+            requireAccess("Drop "+tableType, parentPhysicalTableName, 
Action.READ, Action.EXEC);
+        }
+    }
+
+    @Override
+    public void 
preAlterTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String 
tenantId,
+            String tableName, TableName physicalTableName, TableName 
parentPhysicalTableName, PTableType tableType) throws IOException {
+        if (!accessCheckEnabled) { return; }
+        for (MasterObserver observer : getAccessControllers()) {
+            if (tableType != PTableType.VIEW) {
+            observer.preModifyTable(getMasterObsevrverContext(), 
physicalTableName,
+                    
TableDescriptorBuilder.newBuilder(physicalTableName).build());
+            }
+        }
+        if (tableType == PTableType.VIEW) {
+            requireAccess("Alter "+tableType, parentPhysicalTableName, 
Action.READ, Action.EXEC);
+        }
+    }
+
+    @Override
+    public void 
preGetSchema(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String 
schemaName)
+            throws IOException {
+        if (!accessCheckEnabled) { return; }
+        for (MasterObserver observer : getAccessControllers()) {
+            observer.preListNamespaceDescriptors(getMasterObsevrverContext(),
+                    
Arrays.asList(NamespaceDescriptor.create(schemaName).build()));
+        }
+    }
+
+    @Override
+    public void 
preCreateSchema(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, 
String schemaName)
+            throws IOException {
+        if (!accessCheckEnabled) { return; }
+        for (MasterObserver observer : getAccessControllers()) {
+            observer.preCreateNamespace(getMasterObsevrverContext(),
+                    NamespaceDescriptor.create(schemaName).build());
+        }
+    }
+
+    @Override
+    public void 
preDropSchema(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String 
schemaName)
+            throws IOException {
+        if (!accessCheckEnabled) { return; }
+        for (MasterObserver observer : getAccessControllers()) {
+            observer.preDeleteNamespace(getMasterObsevrverContext(), 
schemaName);
+        }
+    }
+
+    @Override
+    public void 
preIndexUpdate(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, 
String tenantId,
+            String indexName, TableName physicalTableName, TableName 
parentPhysicalTableName, PIndexState newState)
+            throws IOException {
+        if (!accessCheckEnabled) { return; }
+        for (MasterObserver observer : getAccessControllers()) {
+            observer.preModifyTable(getMasterObsevrverContext(), 
physicalTableName,
+                    
TableDescriptorBuilder.newBuilder(physicalTableName).build());
+        }
+        // Check for read access in case of rebuild
+        if (newState == PIndexState.BUILDING) {
+            requireAccess("Rebuild:", parentPhysicalTableName, Action.READ, 
Action.EXEC);
+        }
+    }
+
+    private List<UserPermission> getUserPermissions(final String tableName) 
throws IOException {
+        return User.runAsLoginUser(new 
PrivilegedExceptionAction<List<UserPermission>>() {
+            @Override
+            public List<UserPermission> run() throws Exception {
+                final List<UserPermission> userPermissions = new 
ArrayList<UserPermission>();
+                try (Connection connection = 
ConnectionFactory.createConnection(env.getConfiguration())) {
+                    for (MasterObserver service : accessControllers) {
+                        if 
(service.getClass().getName().equals(org.apache.hadoop.hbase.security.access.AccessController.class.getName()))
 {
+                            
userPermissions.addAll(AccessControlClient.getUserPermissions(connection, 
tableName));
+                        } else {
+                            
AccessControlProtos.GetUserPermissionsRequest.Builder builder = 
AccessControlProtos.GetUserPermissionsRequest
+                                    .newBuilder();
+                            
builder.setTableName(ProtobufUtil.toProtoTableName(TableName.valueOf(tableName)));
+                            
builder.setType(AccessControlProtos.Permission.Type.Table);
+                            AccessControlProtos.GetUserPermissionsRequest 
request = builder.build();
+
+                            
((AccessControlService.Interface)service).getUserPermissions(null, request,
+                                    new 
RpcCallback<AccessControlProtos.GetUserPermissionsResponse>() {
+                                        @Override
+                                        public void 
run(AccessControlProtos.GetUserPermissionsResponse message) {
+                                            if (message != null) {
+                                                for 
(AccessControlProtos.UserPermission perm : message
+                                                        
.getUserPermissionList()) {
+                                                    
userPermissions.add(AccessControlUtil.toUserPermission(perm));
+                                                }
+                                            }
+                                        }
+                                    });
+                        }
+                    }
+                } catch (Throwable e) {
+                    if (e instanceof Exception) {
+                        throw (Exception) e;
+                    } else if (e instanceof Error) {
+                        throw (Error) e;
+                    }
+                    throw new Exception(e);
+                }
+                return userPermissions;
+            }
+        });
+    }
+    
+    /**
+     * Authorizes that the current user has all the given permissions for the
+     * given table
+     * @param tableName Table requested
+     * @throws IOException if obtaining the current user fails
+     * @throws AccessDeniedException if user has no authorization
+     */
+    private void requireAccess(String request, TableName tableName, Action... 
permissions) throws IOException {
+        User user = getActiveUser();
+        AuthResult result = null;
+        List<Action> requiredAccess = new ArrayList<Action>();
+        for (Action permission : permissions) {
+            if (hasAccess(getUserPermissions(tableName.getNameAsString()), 
tableName, permission, user)) {
+                result = AuthResult.allow(request, "Table permission granted", 
user, permission, tableName, null, null);
+            } else {
+                result = AuthResult.deny(request, "Insufficient permissions", 
user, permission, tableName, null, null);
+                requiredAccess.add(permission);
+            }
+            logResult(result);
+        }
+        if (!requiredAccess.isEmpty()) {
+            result = AuthResult.deny(request, "Insufficient permissions", 
user, requiredAccess.get(0), tableName, null,
+                    null);
+        }
+        if (!result.isAllowed()) { throw new 
AccessDeniedException("Insufficient permissions "
+                + authString(user.getName(), tableName, new 
HashSet<Permission.Action>(Arrays.asList(permissions)))); }
+    }
+
+    /**
+     * Checks if the user has access to the table for the specified action.
+     *
+     * @param perms All table permissions
+     * @param table tablename
+     * @param action action for access is required
+     * @return true if the user has access to the table for specified action, 
false otherwise
+     */
+    private boolean hasAccess(List<UserPermission> perms, TableName table, 
Permission.Action action, User user) {
+        if (Superusers.isSuperUser(user)){
+            return true;
+        }
+        if (perms != null) {
+            List<UserPermission> permissionsForUser = 
getPermissionForUser(perms, user.getShortName().getBytes());
+            if (permissionsForUser != null) {
+                for (UserPermission permissionForUser : permissionsForUser) {
+                    if (permissionForUser.implies(action)) { return true; }
+                }
+            }
+            String[] groupNames = user.getGroupNames();
+            if (groupNames != null) {
+              for (String group : groupNames) {
+                List<UserPermission> groupPerms = 
getPermissionForUser(perms,(AuthUtil.toGroupEntry(group)).getBytes());
+                if (groupPerms != null) for (UserPermission permissionForUser 
: groupPerms) {
+                    if (permissionForUser.implies(action)) { return true; }
+                }
+              }
+            }
+        } else if (LOG.isDebugEnabled()) {
+            LOG.debug("No permissions found for table=" + table);
+        }
+        return false;
+    }
+
+    private User getActiveUser() throws IOException {
+        Optional<User> user = RpcServer.getRequestUser();
+        if (!user.isPresent()) {
+            // for non-rpc handling, fallback to system user
+            return userProvider.getCurrent();
+        }
+        return user.get();
+    }
+
+    private void logResult(AuthResult result) {
+        if (AUDITLOG.isTraceEnabled()) {
+            Optional<InetAddress> remoteAddr = RpcServer.getRemoteAddress();
+            AUDITLOG.trace("Access " + (result.isAllowed() ? "allowed" : 
"denied") + " for user "
+                    + (result.getUser() != null ? 
result.getUser().getShortName() : "UNKNOWN") + "; reason: "
+                    + result.getReason() + "; remote address: " + 
(remoteAddr.isPresent() ? remoteAddr.get() : "") + "; request: "
+                    + result.getRequest() + "; context: " + 
result.toContextString());
+        }
+    }
+
+    private static final class Superusers {
+        private static final Log LOG = LogFactory.getLog(Superusers.class);
+
+        /** Configuration key for superusers */
+        public static final String SUPERUSER_CONF_KEY = 
org.apache.hadoop.hbase.security.Superusers.SUPERUSER_CONF_KEY; // Not getting 
a name
+
+        private static List<String> superUsers;
+        private static List<String> superGroups;
+        private static User systemUser;
+
+        private Superusers(){}
+
+        /**
+         * Should be called only once to pre-load list of super users and super
+         * groups from Configuration. This operation is idempotent.
+         * @param conf configuration to load users from
+         * @throws IOException if unable to initialize lists of superusers or 
super groups
+         * @throws IllegalStateException if current user is null
+         */
+        public static void initialize(Configuration conf) throws IOException {
+            superUsers = new ArrayList<>();
+            superGroups = new ArrayList<>();
+            systemUser = User.getCurrent();
+
+            if (systemUser == null) {
+                throw new IllegalStateException("Unable to obtain the current 
user, "
+                    + "authorization checks for internal operations will not 
work correctly!");
+            }
+
+            if (LOG.isTraceEnabled()) {
+                LOG.trace("Current user name is " + systemUser.getShortName());
+            }
+            String currentUser = systemUser.getShortName();
+            String[] superUserList = conf.getStrings(SUPERUSER_CONF_KEY, new 
String[0]);
+            for (String name : superUserList) {
+                if (AuthUtil.isGroupPrincipal(name)) {
+                    superGroups.add(AuthUtil.getGroupName(name));
+                } else {
+                    superUsers.add(name);
+                }
+            }
+            superUsers.add(currentUser);
+        }
+
+        /**
+         * @return true if current user is a super user (whether as user 
running process,
+         * declared as individual superuser or member of supergroup), false 
otherwise.
+         * @param user to check
+         * @throws IllegalStateException if lists of superusers/super groups
+         *   haven't been initialized properly
+         */
+        public static boolean isSuperUser(User user) {
+            if (superUsers == null) {
+                throw new IllegalStateException("Super users/super groups 
lists"
+                    + " haven't been initialized properly.");
+            }
+            if (superUsers.contains(user.getShortName())) {
+                return true;
+            }
+
+            for (String group : user.getGroupNames()) {
+                if (superGroups.contains(group)) {
+                    return true;
+                }
+            }
+            return false;
+        }
+
+        public static List<String> getSuperUsers() {
+            return superUsers;
+        }
+
+        public static User getSystemUser() {
+            return systemUser;
+        }
+    }
+    
+    public String authString(String user, TableName table, Set<Action> 
actions) {
+        StringBuilder sb = new StringBuilder();
+        sb.append(" (user=").append(user != null ? user : "UNKNOWN").append(", 
");
+        sb.append("scope=").append(table == null ? "GLOBAL" : 
table.getNameWithNamespaceInclAsString()).append(", ");
+        sb.append(actions.size() > 1 ? "actions=" : "action=").append(actions 
!= null ? actions.toString() : "")
+                .append(")");
+        return sb.toString();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f85bf4cb/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixCoprocessor.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixCoprocessor.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixCoprocessor.java
new file mode 100644
index 0000000..d993c69
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixCoprocessor.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.coprocessor;
+
+import java.util.Optional;
+
+import org.apache.hadoop.hbase.Coprocessor;
+
+public interface PhoenixCoprocessor extends Coprocessor {
+    default Optional<MetaDataEndpointObserver> getPhoenixObserver() {
+      return Optional.empty();
+    }
+  }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f85bf4cb/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java
new file mode 100644
index 0000000..21c010e
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.coprocessor;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.BaseEnvironment;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+
+
+public class PhoenixMetaDataCoprocessorHost
+        extends 
CoprocessorHost<PhoenixCoprocessor,PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment>
 {
+    private RegionCoprocessorEnvironment env;
+    public static final String PHOENIX_META_DATA_COPROCESSOR_CONF_KEY =
+            "hbase.coprocessor.phoenix.classes";
+    public static final String 
DEFAULT_PHOENIX_META_DATA_COPROCESSOR_CONF_KEY="org.apache.phoenix.coprocessor.PhoenixAccessController";
+
+    public PhoenixMetaDataCoprocessorHost(RegionCoprocessorEnvironment env) 
throws IOException {
+        super(null);
+        this.env = env;
+        this.conf = new Configuration();
+        Iterator<Entry<String, String>> iterator = 
env.getConfiguration().iterator();
+        while (iterator.hasNext()) {
+            Entry<String,String> entry = iterator.next();
+            conf.set(entry.getKey(), entry.getValue());
+        }
+        boolean accessCheckEnabled = 
this.conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
+                QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
+        if (this.conf.get(PHOENIX_META_DATA_COPROCESSOR_CONF_KEY) == null && 
accessCheckEnabled) {
+            this.conf.set(PHOENIX_META_DATA_COPROCESSOR_CONF_KEY, 
DEFAULT_PHOENIX_META_DATA_COPROCESSOR_CONF_KEY);
+        }
+       loadSystemCoprocessors(conf, PHOENIX_META_DATA_COPROCESSOR_CONF_KEY);
+    }
+
+    private ObserverGetter<PhoenixCoprocessor, MetaDataEndpointObserver> 
phoenixObserverGetter =
+            PhoenixCoprocessor::getPhoenixObserver;
+    
+    private abstract class PhoenixObserverOperation extends 
ObserverOperationWithoutResult<MetaDataEndpointObserver> {
+        public PhoenixObserverOperation() {
+            super(phoenixObserverGetter);
+        }
+
+        public PhoenixObserverOperation(User user) {
+            super(phoenixObserverGetter, user);
+        }
+
+        public PhoenixObserverOperation(User user, boolean bypassable) {
+            super(phoenixObserverGetter, user, bypassable);
+        }
+        
+        void callObserver() throws IOException {
+            Optional<MetaDataEndpointObserver> observer = 
phoenixObserverGetter.apply(getEnvironment().getInstance());
+            if (observer.isPresent()) {
+                call(observer.get());
+            }
+        }
+
+        @Override
+        protected void postEnvCall() {}
+    }
+
+    private boolean execOperation(
+            final PhoenixObserverOperation ctx)
+            throws IOException {
+        if (ctx == null) return false;
+        boolean bypass = false;
+        for (PhoenixMetaDataControllerEnvironment env : coprocEnvironments) {
+            if (env.getInstance() instanceof MetaDataEndpointObserver) {
+                ctx.prepare(env);
+                Thread currentThread = Thread.currentThread();
+                ClassLoader cl = currentThread.getContextClassLoader();
+                try {
+                    currentThread.setContextClassLoader(env.getClassLoader());
+                    ctx.callObserver();
+                } catch (Throwable e) {
+                    handleCoprocessorThrowable(env, e);
+                } finally {
+                    currentThread.setContextClassLoader(cl);
+                }
+                bypass |= ctx.shouldBypass();
+                if (bypass) {
+                    break;
+                }
+            }
+            ctx.postEnvCall();
+        }
+        return bypass;
+    }
+    
+    @Override
+    protected void handleCoprocessorThrowable(final 
PhoenixMetaDataControllerEnvironment env, final Throwable e)
+            throws IOException {
+        if (e instanceof IOException) {
+            if (e.getCause() instanceof DoNotRetryIOException) { throw 
(IOException)e.getCause(); }
+        }
+        super.handleCoprocessorThrowable(env, e);
+    }
+
+    /**
+     * Encapsulation of the environment of each coprocessor
+     */
+    static class PhoenixMetaDataControllerEnvironment extends 
BaseEnvironment<PhoenixCoprocessor>
+            implements CoprocessorEnvironment<PhoenixCoprocessor> {
+
+        private RegionCoprocessorEnvironment env;
+
+        public 
PhoenixMetaDataControllerEnvironment(RegionCoprocessorEnvironment env, 
PhoenixCoprocessor instance,
+                int priority, int sequence, Configuration conf) {
+            super(instance, priority, sequence, conf);
+            this.env = env;
+        }
+        
+        public RegionCoprocessorHost getCoprocessorHost() {
+            return ((HRegion)env.getRegion()).getCoprocessorHost();
+        }
+    }
+    
+
+    public void preGetTable(final String tenantId, final String tableName, 
final TableName physicalTableName)
+            throws IOException {
+        execOperation(new PhoenixObserverOperation() {
+            @Override
+            public void call(MetaDataEndpointObserver observer) throws 
IOException {
+                observer.preGetTable(this, tenantId, tableName, 
physicalTableName);
+            }
+        });
+    }
+
+    public void preCreateTable(final String tenantId, final String tableName, 
final TableName physicalTableName,
+            final TableName parentPhysicalTableName, final PTableType 
tableType, final Set<byte[]> familySet, final Set<TableName> indexes)
+            throws IOException {
+        execOperation(new PhoenixObserverOperation() {
+            @Override
+            public void call(MetaDataEndpointObserver observer) throws 
IOException {
+                observer.preCreateTable(this, tenantId, tableName, 
physicalTableName, parentPhysicalTableName, tableType,
+                        familySet, indexes);
+            }
+        });
+    }
+
+    public void preDropTable(final String tenantId, final String tableName, 
final TableName physicalTableName,
+            final TableName parentPhysicalTableName, final PTableType 
tableType, final List<PTable> indexes) throws IOException {
+        execOperation(new PhoenixObserverOperation() {
+            @Override
+            public void call(MetaDataEndpointObserver observer) throws 
IOException {
+                observer.preDropTable(this, tenantId, tableName, 
physicalTableName, parentPhysicalTableName, tableType, indexes);
+            }
+        });
+    }
+
+    public void preAlterTable(final String tenantId, final String tableName, 
final TableName physicalTableName,
+            final TableName parentPhysicalTableName, final PTableType type) 
throws IOException {
+        execOperation(new PhoenixObserverOperation() {
+            @Override
+            public void call(MetaDataEndpointObserver observer) throws 
IOException {
+                observer.preAlterTable(this, tenantId, tableName, 
physicalTableName, parentPhysicalTableName, type);
+            }
+        });
+    }
+
+    public void preGetSchema(final String schemaName) throws IOException {
+        execOperation(new PhoenixObserverOperation() {
+            @Override
+            public void call(MetaDataEndpointObserver observer) throws 
IOException {
+                observer.preGetSchema(this, schemaName);
+            }
+        });
+    }
+
+    public void preCreateSchema(final String schemaName) throws IOException {
+
+        execOperation(new PhoenixObserverOperation() {
+            @Override
+            public void call(MetaDataEndpointObserver observer) throws 
IOException {
+                observer.preCreateSchema(this, schemaName);
+            }
+        });
+    }
+
+    public void preDropSchema(final String schemaName) throws IOException {
+        execOperation(new PhoenixObserverOperation() {
+            @Override
+            public void call(MetaDataEndpointObserver observer) throws 
IOException {
+                observer.preDropSchema(this, schemaName);
+            }
+        });
+    }
+
+    public void preIndexUpdate(final String tenantId, final String indexName, 
final TableName physicalTableName,
+            final TableName parentPhysicalTableName, final PIndexState 
newState) throws IOException {
+        execOperation(new PhoenixObserverOperation() {
+            @Override
+            public void call(MetaDataEndpointObserver observer) throws 
IOException {
+                observer.preIndexUpdate(this, tenantId, indexName, 
physicalTableName, parentPhysicalTableName, newState);
+            }
+        });
+    }
+
+
+    @Override
+    public PhoenixCoprocessor checkAndGetInstance(Class<?> implClass)
+            throws InstantiationException, IllegalAccessException {
+        if (PhoenixCoprocessor.class
+                .isAssignableFrom(implClass)) { return 
(PhoenixCoprocessor)implClass.newInstance(); }
+        return null;
+    }
+
+    @Override
+    public PhoenixMetaDataControllerEnvironment 
createEnvironment(PhoenixCoprocessor instance, int priority,
+            int sequence, Configuration conf) {
+        return new PhoenixMetaDataControllerEnvironment(env, instance, 
priority, sequence, conf);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f85bf4cb/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index 9a2981f..1849144 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.index;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.sql.SQLException;
 import java.util.Collection;
 import java.util.Collections;
@@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
@@ -160,12 +162,12 @@ public class PhoenixIndexFailurePolicy extends 
DelegateIndexFailurePolicy {
     }
 
     private long 
handleFailureWithExceptions(Multimap<HTableInterfaceReference, Mutation> 
attempted,
-            Exception cause) throws Throwable {
+            final Exception cause) throws Throwable {
         Set<HTableInterfaceReference> refs = attempted.asMap().keySet();
-        Map<String, Long> indexTableNames = new HashMap<String, 
Long>(refs.size());
+        final Map<String, Long> indexTableNames = new HashMap<String, 
Long>(refs.size());
         // start by looking at all the tables to which we attempted to write
         long timestamp = 0;
-        boolean leaveIndexActive = blockDataTableWritesOnFailure || 
!disableIndexOnFailure;
+        final boolean leaveIndexActive = blockDataTableWritesOnFailure || 
!disableIndexOnFailure;
         // if using TrackingParallelWriter, we know which indexes failed and 
only disable those
         Set<HTableInterfaceReference> failedTables = cause instanceof 
MultiIndexWriteFailureException 
                 ? new 
HashSet<HTableInterfaceReference>(((MultiIndexWriteFailureException)cause).getFailedTables())
@@ -209,55 +211,66 @@ public class PhoenixIndexFailurePolicy extends 
DelegateIndexFailurePolicy {
             return timestamp;
         }
 
-        PIndexState newState = disableIndexOnFailure ? PIndexState.DISABLE : 
PIndexState.PENDING_ACTIVE;
+        final PIndexState newState = disableIndexOnFailure ? 
PIndexState.DISABLE : PIndexState.PENDING_ACTIVE;
+        final long fTimestamp = timestamp;
         // for all the index tables that we've found, try to disable them and 
if that fails, try to
-        for (Map.Entry<String, Long> tableTimeElement 
:indexTableNames.entrySet()){
-            String indexTableName = tableTimeElement.getKey();
-            long minTimeStamp = tableTimeElement.getValue();
-            // We need a way of differentiating the block writes to data table 
case from
-            // the leave index active case. In either case, we need to know 
the time stamp
-            // at which writes started failing so we can rebuild from that 
point. If we
-            // keep the index active *and* have a positive 
INDEX_DISABLE_TIMESTAMP_BYTES,
-            // then writes to the data table will be blocked (this is client 
side logic
-            // and we can't change this in a minor release). So we use the 
sign of the
-            // time stamp to differentiate.
-            if (!disableIndexOnFailure && !blockDataTableWritesOnFailure) {
-                minTimeStamp *= -1;
-            }
-            // Disable the index by using the updateIndexState method of 
MetaDataProtocol end point coprocessor.
-            try (Table systemTable = env.getConnection().getTable(SchemaUtil
-                    
.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, 
env.getConfiguration()))) {
-                MetaDataMutationResult result = 
IndexUtil.updateIndexState(indexTableName, minTimeStamp,
-                        systemTable, newState);
-                if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) {
-                    LOG.info("Index " + indexTableName + " has been dropped. 
Ignore uncommitted mutations");
-                    continue;
-                }
-                if (result.getMutationCode() != 
MutationCode.TABLE_ALREADY_EXISTS) {
-                    if (leaveIndexActive) {
-                        LOG.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " 
+ " failed with code = "
-                                + result.getMutationCode());
-                        // If we're not disabling the index, then we don't 
want to throw as throwing
-                        // will lead to the RS being shutdown.
-                        if (blockDataTableWritesOnFailure) {
-                            throw new DoNotRetryIOException("Attempt to update 
INDEX_DISABLE_TIMESTAMP failed.");
+        return User.runAsLoginUser(new PrivilegedExceptionAction<Long>() {
+            @Override
+            public Long run() throws Exception {
+                for (Map.Entry<String, Long> tableTimeElement : 
indexTableNames.entrySet()) {
+                    String indexTableName = tableTimeElement.getKey();
+                    long minTimeStamp = tableTimeElement.getValue();
+                    // We need a way of differentiating the block writes to 
data table case from
+                    // the leave index active case. In either case, we need to 
know the time stamp
+                    // at which writes started failing so we can rebuild from 
that point. If we
+                    // keep the index active *and* have a positive 
INDEX_DISABLE_TIMESTAMP_BYTES,
+                    // then writes to the data table will be blocked (this is 
client side logic
+                    // and we can't change this in a minor release). So we use 
the sign of the
+                    // time stamp to differentiate.
+                    if (!disableIndexOnFailure && 
!blockDataTableWritesOnFailure) {
+                        minTimeStamp *= -1;
+                    }
+                    // Disable the index by using the updateIndexState method 
of MetaDataProtocol end point coprocessor.
+                    try (Table systemTable = 
env.getConnection().getTable(SchemaUtil.getPhysicalTableName(
+                            PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, 
env.getConfiguration()))) {
+                        MetaDataMutationResult result = 
IndexUtil.updateIndexState(indexTableName, minTimeStamp,
+                                systemTable, newState);
+                        if (result.getMutationCode() == 
MutationCode.TABLE_NOT_FOUND) {
+                            LOG.info("Index " + indexTableName + " has been 
dropped. Ignore uncommitted mutations");
+                            continue;
+                        }
+                        if (result.getMutationCode() != 
MutationCode.TABLE_ALREADY_EXISTS) {
+                            if (leaveIndexActive) {
+                                LOG.warn("Attempt to update 
INDEX_DISABLE_TIMESTAMP " + " failed with code = "
+                                        + result.getMutationCode());
+                                // If we're not disabling the index, then we 
don't want to throw as throwing
+                                // will lead to the RS being shutdown.
+                                if (blockDataTableWritesOnFailure) { throw new 
DoNotRetryIOException(
+                                        "Attempt to update 
INDEX_DISABLE_TIMESTAMP failed."); }
+                            } else {
+                                LOG.warn("Attempt to disable index " + 
indexTableName + " failed with code = "
+                                        + result.getMutationCode() + ". Will 
use default failure policy instead.");
+                                throw new DoNotRetryIOException("Attempt to 
disable " + indexTableName + " failed.");
+                            }
+                        }
+                        if (leaveIndexActive)
+                            LOG.info("Successfully update 
INDEX_DISABLE_TIMESTAMP for " + indexTableName
+                                    + " due to an exception while writing 
updates.", cause);
+                        else
+                            LOG.info("Successfully disabled index " + 
indexTableName
+                                    + " due to an exception while writing 
updates.", cause);
+                    } catch (Throwable t) {
+                        if (t instanceof Exception) {
+                            throw (Exception)t;
+                        } else {
+                            throw new Exception(t);
                         }
-                    } else {
-                        LOG.warn("Attempt to disable index " + indexTableName 
+ " failed with code = "
-                                + result.getMutationCode() + ". Will use 
default failure policy instead.");
-                        throw new DoNotRetryIOException("Attempt to disable " 
+ indexTableName + " failed.");
-                    } 
+                    }
                 }
-                if (leaveIndexActive)
-                    LOG.info("Successfully update INDEX_DISABLE_TIMESTAMP for 
" + indexTableName + " due to an exception while writing updates.",
-                            cause);
-                else
-                    LOG.info("Successfully disabled index " + indexTableName + 
" due to an exception while writing updates.",
-                            cause);
+                // Return the cell time stamp (note they should all be the 
same)
+                return fTimestamp;
             }
-        }
-        // Return the cell time stamp (note they should all be the same)
-        return timestamp;
+        });
     }
 
     private Collection<? extends String> 
getLocalIndexNames(HTableInterfaceReference ref,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f85bf4cb/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index b129efc..84b3b83 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -262,6 +262,10 @@ public interface QueryServices extends SQLCloseable {
     
     //currently BASE64 and ASCII is supported
     public static final String UPLOAD_BINARY_DATA_TYPE_ENCODING = 
"phoenix.upload.binaryDataType.encoding";
+    // Toggle for server-written updates to SYSTEM.CATALOG
+    public static final String PHOENIX_ACLS_ENABLED = "phoenix.acls.enabled";
+    public static final String PHOENIX_AUTOMATIC_GRANT_ENABLED = 
"phoenix.security.automatic.grant.enabled";
+    public static final String PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED 
= "phoenix.security.strict.mode.enabled";
 
     public static final String INDEX_ASYNC_BUILD_ENABLED = 
"phoenix.index.async.build.enabled";
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f85bf4cb/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 638c36e..5ef8314 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -59,11 +59,14 @@ import static 
org.apache.phoenix.query.QueryServices.MAX_TENANT_MEMORY_PERC_ATTR
 import static 
org.apache.phoenix.query.QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_ATTRIB;
 import static 
org.apache.phoenix.query.QueryServices.NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK;
+import static org.apache.phoenix.query.QueryServices.PHOENIX_ACLS_ENABLED;
+import static 
org.apache.phoenix.query.QueryServices.PHOENIX_AUTOMATIC_GRANT_ENABLED;
 import static 
org.apache.phoenix.query.QueryServices.PHOENIX_QUERY_SERVER_CLUSTER_BASE_PATH;
 import static 
org.apache.phoenix.query.QueryServices.PHOENIX_QUERY_SERVER_LOADBALANCER_ENABLED;
 import static 
org.apache.phoenix.query.QueryServices.PHOENIX_QUERY_SERVER_SERVICE_NAME;
 import static 
org.apache.phoenix.query.QueryServices.PHOENIX_QUERY_SERVER_ZK_ACL_PASSWORD;
 import static 
org.apache.phoenix.query.QueryServices.PHOENIX_QUERY_SERVER_ZK_ACL_USERNAME;
+import static 
org.apache.phoenix.query.QueryServices.PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED;
 import static org.apache.phoenix.query.QueryServices.QUEUE_SIZE_ATTRIB;
 import static 
org.apache.phoenix.query.QueryServices.REGIONSERVER_INFO_PORT_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.RENEW_LEASE_ENABLED;
@@ -319,6 +322,11 @@ public class QueryServicesOptions {
     public static final int DEFAULT_CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS 
= 0;
     public static final boolean DEFAULT_STATS_COLLECTION_ENABLED = true;
     public static final boolean DEFAULT_USE_STATS_FOR_PARALLELIZATION = true;
+    
+    //Security defaults
+    public static final boolean DEFAULT_PHOENIX_ACLS_ENABLED = false;
+    public static final boolean DEFAULT_PHOENIX_AUTOMATIC_GRANT_ENABLED = 
false;
+    public static final boolean 
DEFAULT_PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED = true;
 
     //default update cache frequency
     public static final int DEFAULT_UPDATE_CACHE_FREQUENCY = 0;
@@ -420,6 +428,11 @@ public class QueryServicesOptions {
             .setIfUnset(TRACING_THREAD_POOL_SIZE, 
DEFAULT_TRACING_THREAD_POOL_SIZE)
             .setIfUnset(STATS_COLLECTION_ENABLED, 
DEFAULT_STATS_COLLECTION_ENABLED)
             .setIfUnset(USE_STATS_FOR_PARALLELIZATION, 
DEFAULT_USE_STATS_FOR_PARALLELIZATION)
+            .setIfUnset(USE_STATS_FOR_PARALLELIZATION, 
DEFAULT_USE_STATS_FOR_PARALLELIZATION)
+            .setIfUnset(UPLOAD_BINARY_DATA_TYPE_ENCODING, 
DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING)
+            .setIfUnset(PHOENIX_ACLS_ENABLED,  DEFAULT_PHOENIX_ACLS_ENABLED)
+            .setIfUnset(PHOENIX_AUTOMATIC_GRANT_ENABLED,  
DEFAULT_PHOENIX_AUTOMATIC_GRANT_ENABLED)
+            .setIfUnset(PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED,  
DEFAULT_PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED)
             .setIfUnset(COST_BASED_OPTIMIZER_ENABLED, 
DEFAULT_COST_BASED_OPTIMIZER_ENABLED);
         // HBase sets this to 1, so we reset it to something more appropriate.
         // Hopefully HBase will change this, because we can't know if a user 
set

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f85bf4cb/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
index 459a3a9..f4d5e63 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
@@ -25,6 +25,7 @@ import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.EOFException;
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.sql.Date;
 import java.util.ArrayList;
 import java.util.List;
@@ -46,6 +47,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Mut
 import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
 import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
 import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -207,23 +209,31 @@ public class StatisticsWriter implements Closeable {
         }
     }
 
-    public void commitStats(List<Mutation> mutations, StatisticsCollector 
statsCollector) throws IOException {
-        commitLastStatsUpdatedTime(statsCollector);
-        if (mutations.size() > 0) {
-            byte[] row = mutations.get(0).getRow();
-            MutateRowsRequest.Builder mrmBuilder = 
MutateRowsRequest.newBuilder();
-            for (Mutation m : mutations) {
-                
mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(getMutationType(m), m));
-            }
-            MutateRowsRequest mrm = mrmBuilder.build();
-            CoprocessorRpcChannel channel = 
statsWriterTable.coprocessorService(row);
-            MultiRowMutationService.BlockingInterface service = 
MultiRowMutationService.newBlockingStub(channel);
-            try {
-                service.mutateRows(null, mrm);
-            } catch (ServiceException ex) {
-                ProtobufUtil.toIOException(ex);
+    public void commitStats(final List<Mutation> mutations, final 
StatisticsCollector statsCollector)
+            throws IOException {
+        User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws Exception {
+                commitLastStatsUpdatedTime(statsCollector);
+                if (mutations.size() > 0) {
+                    byte[] row = mutations.get(0).getRow();
+                    MutateRowsRequest.Builder mrmBuilder = 
MutateRowsRequest.newBuilder();
+                    for (Mutation m : mutations) {
+                        
mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(getMutationType(m), m));
+                    }
+                    MutateRowsRequest mrm = mrmBuilder.build();
+                    CoprocessorRpcChannel channel = 
statsWriterTable.coprocessorService(row);
+                    MultiRowMutationService.BlockingInterface service = 
MultiRowMutationService
+                            .newBlockingStub(channel);
+                    try {
+                        service.mutateRows(null, mrm);
+                    } catch (ServiceException ex) {
+                        ProtobufUtil.toIOException(ex);
+                    }
+                }
+                return null;
             }
-        }
+        });
     }
 
     private Put getLastStatsUpdatedTimePut(long timeStamp) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f85bf4cb/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index b71249e..e31d9ab 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -58,6 +58,7 @@ import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PNameFactory;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTable.LinkType;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.SequenceKey;
@@ -223,6 +224,16 @@ public class MetaDataUtil {
         }
         return null;
     }
+
+    public static boolean isNameSpaceMapped(List<Mutation> tableMetaData, 
KeyValueBuilder builder,
+            ImmutableBytesWritable value) {
+        if (getMutationValue(getPutOnlyTableHeaderRow(tableMetaData),
+            PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES, builder, 
value)) {
+            return 
(boolean)PBoolean.INSTANCE.toObject(ByteUtil.copyKeyBytesIfNecessary(value));
+        }
+        return false;
+    }
+
     
     public static long getParentSequenceNumber(List<Mutation> tableMetaData) {
         return getSequenceNumber(getParentTableHeaderRow(tableMetaData));
@@ -662,4 +673,11 @@ public class MetaDataUtil {
         byte[] physicalTableName = 
Bytes.toBytes(SchemaUtil.getTableNameFromFullName(view.getPhysicalName().getString()));
         return SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, 
physicalTableSchemaName, physicalTableName);
     }
+    
+    public static IndexType getIndexType(List<Mutation> tableMetaData, 
KeyValueBuilder builder,
+            ImmutableBytesWritable value) {
+        if (getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), 
PhoenixDatabaseMetaData.INDEX_TYPE_BYTES, builder,
+                value)) { return 
IndexType.fromSerializedValue(value.get()[value.getOffset()]); }
+        return null;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f85bf4cb/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index a73467a..62f453e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -20,9 +20,11 @@ package org.apache.phoenix.util;
 import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Preconditions.checkNotNull;
 import static com.google.common.base.Strings.isNullOrEmpty;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES;
 
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
@@ -42,8 +44,11 @@ import java.util.TreeSet;
 import javax.annotation.Nullable;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -76,6 +81,7 @@ import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableProperty;
 import org.apache.phoenix.schema.ValueSchema.Field;
+import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.types.PVarchar;
@@ -1136,4 +1142,11 @@ public class SchemaUtil {
         }
         return false;
     }
+
+    public static boolean isNamespaceMapped(Result currentResult) {
+        Cell isNamespaceMappedCell = 
currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, 
IS_NAMESPACE_MAPPED_BYTES);
+        return isNamespaceMappedCell!=null && (boolean) 
PBoolean.INSTANCE.toObject(CellUtil.cloneValue(isNamespaceMappedCell));
+    }
+    
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f85bf4cb/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 780d420..23604f2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -242,7 +242,9 @@ public class ServerUtil {
         if (rowLock == null) {
             throw new IOException("Failed to acquire lock on " + 
Bytes.toStringBinary(key));
         }
-        locks.add(rowLock);
+        if (locks != null) {
+            locks.add(rowLock);
+        }
         return rowLock;
     }
 

Reply via email to