This is an automated email from the ASF dual-hosted git repository.

linaataustin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/sentry.git


The following commit(s) were added to refs/heads/master by this push:
     new 0067d47  SENTRY-2483: Implement HMS PreReadEvent support in 
MetastoreAuthzBinding (Na Li, reviewed by Arjun Mishra, Kalyan Kumar Kalvagadda)
0067d47 is described below

commit 0067d477ff83970246f81ed5badd738e9d0f8319
Author: lina.li <lina...@cloudera.com>
AuthorDate: Mon Jan 28 16:05:23 2019 -0600

    SENTRY-2483: Implement HMS PreReadEvent support in MetastoreAuthzBinding 
(Na Li, reviewed by Arjun Mishra, Kalyan Kumar Kalvagadda)
---
 .../sentry/binding/hive/conf/HiveAuthzConf.java    |  2 +
 .../metastore/MetastoreAuthzBindingBase.java       | 48 ++++++++++--
 sentry-tests/sentry-tests-hive/pom.xml             |  4 +
 .../hive/AbstractTestWithStaticConfiguration.java  | 16 +++-
 .../e2e/hive/hiveserver/HiveServerFactory.java     |  2 -
 ...stractMetastoreTestWithStaticConfiguration.java |  1 -
 .../e2e/metastore/TestAuthorizingObjectStore.java  | 45 +++++++++++-
 .../tests/e2e/metastore/TestMetastoreEndToEnd.java | 85 ++++++++++++++++++++++
 8 files changed, 192 insertions(+), 11 deletions(-)

diff --git 
a/sentry-binding/sentry-binding-hive-conf/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java
 
b/sentry-binding/sentry-binding-hive-conf/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java
index 9efd612..5c43329 100644
--- 
a/sentry-binding/sentry-binding-hive-conf/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java
+++ 
b/sentry-binding/sentry-binding-hive-conf/src/main/java/org/apache/sentry/binding/hive/conf/HiveAuthzConf.java
@@ -109,6 +109,8 @@ public class HiveAuthzConf extends Configuration {
         AUTHZ_SYNC_ALTER_WITH_POLICY_STORE("sentry.hive.sync.alter", "true"),
         AUTHZ_SYNC_CREATE_WITH_POLICY_STORE("sentry.hive.sync.create", 
"false"),
         AUTHZ_SYNC_DROP_WITH_POLICY_STORE("sentry.hive.sync.drop", "true"),
+        // Specify authorizing on reading metadata is enabled or not at HMS 
server
+        
AUTHZ_METASTORE_READ_AUTHORIZATION_ENABLED("senry.metastore.read.authorization.enabled",
 "false"),
 
         AUTHZ_PROVIDER_DEPRECATED("hive.sentry.provider",
                 
"org.apache.sentry.provider.file.ResourceAuthorizationProvider"),
diff --git 
a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/MetastoreAuthzBindingBase.java
 
b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/MetastoreAuthzBindingBase.java
index 328d2b5..cdb6de4 100644
--- 
a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/MetastoreAuthzBindingBase.java
+++ 
b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/MetastoreAuthzBindingBase.java
@@ -19,6 +19,7 @@ package org.apache.sentry.binding.metastore;
 
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.Iterator;
 import org.apache.commons.lang.StringUtils;
@@ -38,6 +39,8 @@ import 
org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
 import org.apache.hadoop.hive.metastore.events.PreEventContext;
+import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
 import org.apache.hadoop.hive.shims.Utils;
 import org.apache.sentry.binding.hive.authz.HiveAuthzBinding;
@@ -63,10 +66,10 @@ import java.util.Set;
 /**
  * Sentry binding for Hive Metastore. The binding is integrated into Metastore
  * via the pre-event listener which are fired prior to executing the metadata
- * action. This point we are only authorizing metadata writes since the 
listners
- * are not fired from read events. Each action builds a input and output
- * hierarchy as per the objects used in the given operations. This is then
- * passed down to the hive binding which handles the authorization. This 
ensures
+ * action. This point we always authorize metadata writes. Authorizing 
metadata reads can be
+ * enabled or disabled to maintain backwards compatibility.
+ * Each action builds a input and output hierarchy as per the objects used in 
the given operations.
+ * This is then passed down to the hive binding which handles the 
authorization. This ensures
  * that we follow the same privilege model and policies.
  */
 public abstract class MetastoreAuthzBindingBase extends 
MetaStorePreEventListener {
@@ -75,6 +78,7 @@ public abstract class MetastoreAuthzBindingBase extends 
MetaStorePreEventListene
    * Build the set of object hierarchies ie fully qualified db model objects
    */
   protected static class HierarcyBuilder {
+    public static final Set EMPTY_HIERARCHY = Collections.emptySet();
     private Set<List<DBModelAuthorizable>> authHierarchy;
 
     public HierarcyBuilder() {
@@ -135,6 +139,7 @@ public abstract class MetastoreAuthzBindingBase extends 
MetaStorePreEventListene
   private HiveAuthzBinding hiveAuthzBinding;
   private final String warehouseDir;
   protected static boolean sentryCacheOutOfSync = false;
+  protected final boolean readAuthorizationEnabled;
 
   public MetastoreAuthzBindingBase(Configuration config) throws Exception {
     super(config);
@@ -155,9 +160,11 @@ public abstract class MetastoreAuthzBindingBase extends 
MetaStorePreEventListene
     hiveConf = new HiveConf(config, this.getClass());
     this.authServer = new Server(authzConf.get(AuthzConfVars.AUTHZ_SERVER_NAME
         .getVar()));
-    serviceUsers = ImmutableSet.copyOf(toTrimedLower(Sets.newHashSet(authzConf
+    serviceUsers = ImmutableSet.copyOf(Sets.newHashSet(authzConf
         .getStrings(AuthzConfVars.AUTHZ_METASTORE_SERVICE_USERS.getVar(),
-            new String[] { "" }))));
+            new String[] { "" })));
+    readAuthorizationEnabled = authzConf.getBoolean(
+        AuthzConfVars.AUTHZ_METASTORE_READ_AUTHORIZATION_ENABLED.getVar(), 
false);
     warehouseDir = hiveConf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
 
   }
@@ -200,6 +207,16 @@ public abstract class MetastoreAuthzBindingBase extends 
MetaStorePreEventListene
     case LOAD_PARTITION_DONE:
       // noop for now
       break;
+    case READ_DATABASE:
+      if (readAuthorizationEnabled) {
+        authorizeReadDatabase((PreReadDatabaseEvent) context);
+      }
+      break;
+    case READ_TABLE:
+      if (readAuthorizationEnabled) {
+        authorizeReadTable((PreReadTableEvent) context);
+      }
+      break;
     default:
       break;
     }
@@ -389,6 +406,25 @@ public abstract class MetastoreAuthzBindingBase extends 
MetaStorePreEventListene
         inputBuilder.build(), outputBuilder.build());
   }
 
+  private void authorizeReadDatabase(PreReadDatabaseEvent context)
+    throws InvalidOperationException {
+    String dbName = context.getDatabase().getName();
+
+    authorizeMetastoreAccess(HiveOperation.DESCDATABASE,
+      new HierarcyBuilder().addDbToOutput(getAuthServer(), dbName).build(),
+      HierarcyBuilder.EMPTY_HIERARCHY);
+  }
+
+  private void authorizeReadTable(PreReadTableEvent context)
+    throws InvalidOperationException {
+    String dbName = context.getTable().getDbName();
+    String tableName = context.getTable().getTableName();
+
+    authorizeMetastoreAccess(HiveOperation.DESCTABLE,
+      new HierarcyBuilder().addTableToOutput(getAuthServer(), dbName, 
tableName).build(),
+      HierarcyBuilder.EMPTY_HIERARCHY);
+  }
+
   protected InvalidOperationException invalidOperationException(Exception e) {
     InvalidOperationException ex = new 
InvalidOperationException(e.getMessage());
     ex.initCause(e.getCause());
diff --git a/sentry-tests/sentry-tests-hive/pom.xml 
b/sentry-tests/sentry-tests-hive/pom.xml
index 74777bb..54266e6 100644
--- a/sentry-tests/sentry-tests-hive/pom.xml
+++ b/sentry-tests/sentry-tests-hive/pom.xml
@@ -40,6 +40,10 @@ limitations under the License.
          API to remove the prefix instead of adding the ant dependency
     -->
     <dependency>
+      <groupId>org.assertj</groupId>
+      <artifactId>assertj-core</artifactId>
+    </dependency>
+    <dependency>
       <groupId>org.apache.ant</groupId>
       <artifactId>ant</artifactId>
       <version>1.9.1</version>
diff --git 
a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java
 
b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java
index 8bf486e..d63957a 100644
--- 
a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java
+++ 
b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java
@@ -135,6 +135,8 @@ public abstract class AbstractTestWithStaticConfiguration 
extends RulesForE2ETes
   protected static boolean setMetastoreListener = true;
   protected static String testServerType = null;
   protected static boolean enableHiveConcurrency = false;
+  protected static boolean enableAuthorizingObjectStore = true;
+  protected static boolean enableAuthorizeReadMetaData = false;
   // indicate if the database need to be clear for every test case in one test 
class
   protected static boolean clearDbPerTest = true;
 
@@ -280,6 +282,19 @@ public abstract class AbstractTestWithStaticConfiguration 
extends RulesForE2ETes
       policyURI = policyFileLocation.getPath();
     }
 
+    if (enableAuthorizingObjectStore) {
+      properties.put(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL.varname,
+        "org.apache.sentry.binding.metastore.AuthorizingObjectStore");
+    }else {
+      properties.put(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL.varname, 
ConfVars.METASTORE_RAW_STORE_IMPL.defaultStrVal);
+    }
+
+    if (enableAuthorizeReadMetaData) {
+      properties.put("senry.metastore.read.authorization.enabled", "true");
+    } else {
+      properties.put("senry.metastore.read.authorization.enabled", "false");
+    }
+
     if (enableHiveConcurrency) {
       properties.put(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, 
"true");
       properties.put(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname,
@@ -476,7 +491,6 @@ public abstract class AbstractTestWithStaticConfiguration 
extends RulesForE2ETes
   private static void setupSentryService() throws Exception {
 
     sentryConf = new Configuration(true);
-
     properties.put(HiveServerFactory.AUTHZ_PROVIDER_BACKEND,
         SimpleDBProviderBackend.class.getName());
     properties.put(ConfVars.HIVE_AUTHORIZATION_TASK_FACTORY.varname,
diff --git 
a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServerFactory.java
 
b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServerFactory.java
index 7d41348..c8d0177 100644
--- 
a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServerFactory.java
+++ 
b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/hiveserver/HiveServerFactory.java
@@ -163,8 +163,6 @@ public class HiveServerFactory {
       hadoopPath.toFile().setExecutable(true);
     }
 
-    properties.put(METASTORE_RAW_STORE_IMPL,
-        "org.apache.sentry.binding.metastore.AuthorizingObjectStore");
     if (HiveServer2Type.InternalMetastore.equals(type)) {
       // The configuration sentry.metastore.service.users is for the user who
       // has all access to get the metadata.
diff --git 
a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java
 
b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java
index f14cbb6..f1600c5 100644
--- 
a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java
+++ 
b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java
@@ -247,7 +247,6 @@ public abstract class 
AbstractMetastoreTestWithStaticConfiguration extends
     });
   }
 
-
   public void execHiveSQL(String sqlStmt, String userName) throws Exception {
     execHiveSQLwithOverlay(sqlStmt, userName, new HashMap<String, String>());
   }
diff --git 
a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestAuthorizingObjectStore.java
 
b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestAuthorizingObjectStore.java
index 3c28fd0..a6d1505 100644
--- 
a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestAuthorizingObjectStore.java
+++ 
b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestAuthorizingObjectStore.java
@@ -57,6 +57,7 @@ public class TestAuthorizingObjectStore extends
 
   @BeforeClass
   public static void setupTestStaticConfiguration () throws Exception {
+    enableAuthorizeReadMetaData = true;
     
AbstractMetastoreTestWithStaticConfiguration.setupTestStaticConfiguration();
   }
 
@@ -441,7 +442,17 @@ public class TestAuthorizingObjectStore extends
   @Test
   public void testPrivilegesForPartialAccess() throws Exception {
     HiveMetaStoreClient client = context.getMetaStoreClient(USER2_1);
-    assertThat(client.getDatabase(dbName1), notNullValue());
+
+    try {
+      // user only has select on dbName1.tabName1
+      client.getDatabase(dbName1);
+      fail("NoSuchObjectException should have been thrown");
+    } catch (NoSuchObjectException noe) {
+      // ignore, just make sure the authorization is failed.
+    } catch (MetaException ex) {
+      // ignore, just make sure the authorization is failed.
+    }
+
     try {
       client.getDatabase(dbName2);
       fail("NoSuchObjectException should have been thrown");
@@ -458,18 +469,26 @@ public class TestAuthorizingObjectStore extends
       fail("NoSuchObjectException should have been thrown");
     } catch (NoSuchObjectException noe) {
       // ignore, just make sure the authorization is failed.
+    } catch (MetaException ex) {
+      // ignore, just make sure the authorization is failed.
     }
+
     try {
       client.getTable(dbName2, tabName3);
       fail("NoSuchObjectException should have been thrown");
     } catch (NoSuchObjectException noe) {
       // ignore, just make sure the authorization is failed.
+    } catch (MetaException ex) {
+      // ignore, just make sure the authorization is failed.
     }
+
     try {
       client.getTable(dbName2, tabName4);
       fail("NoSuchObjectException should have been thrown");
     } catch (NoSuchObjectException noe) {
       // ignore, just make sure the authorization is failed.
+    } catch (MetaException ex) {
+      // ignore, just make sure the authorization is failed.
     }
 
     assertThat(client.listPartitions(dbName1, tabName1, (short) 1).size(), 
equalTo(1));
@@ -478,18 +497,26 @@ public class TestAuthorizingObjectStore extends
       fail("MetaException should have been thrown");
     } catch (NoSuchObjectException noe) {
       // ignore, just make sure the authorization is failed.
+    } catch (MetaException ex) {
+      // ignore, just make sure the authorization is failed.
     }
+
     try {
       client.listPartitions(dbName2, tabName3, (short) 1);
       fail("MetaException should have been thrown");
     } catch (NoSuchObjectException noe) {
       // ignore, just make sure the authorization is failed.
+    } catch (MetaException ex) {
+      // ignore, just make sure the authorization is failed.
     }
+
     try {
       client.listPartitions(dbName2, tabName4, (short) 1);
       fail("MetaException should have been thrown");
     } catch (NoSuchObjectException noe) {
       // ignore, just make sure the authorization is failed.
+    } catch (MetaException ex) {
+      // ignore, just make sure the authorization is failed.
     }
 
     assertThat(
@@ -501,20 +528,28 @@ public class TestAuthorizingObjectStore extends
       fail("MetaException should have been thrown");
     } catch (NoSuchObjectException noe) {
       // ignore, just make sure the authorization is failed.
+    } catch (MetaException ex) {
+      // ignore, just make sure the authorization is failed.
     }
+
     try {
       client.listPartitions(dbName2, tabName3, new 
ArrayList<String>(Arrays.asList(partitionVal)),
           (short) 1);
       fail("MetaException should have been thrown");
     } catch (NoSuchObjectException noe) {
       // ignore, just make sure the authorization is failed.
+    } catch (MetaException ex) {
+      // ignore, just make sure the authorization is failed.
     }
+
     try {
       client.listPartitions(dbName2, tabName4, new 
ArrayList<String>(Arrays.asList(partitionVal)),
           (short) 1);
       fail("MetaException should have been thrown");
     } catch (NoSuchObjectException noe) {
       // ignore, just make sure the authorization is failed.
+    } catch (MetaException ex) {
+      // ignore, just make sure the authorization is failed.
     }
 
     assertThat(
@@ -525,18 +560,26 @@ public class TestAuthorizingObjectStore extends
       fail("NoSuchObjectException should have been thrown");
     } catch (NoSuchObjectException noe) {
       // ignore, just make sure the authorization is failed.
+    } catch (MetaException ex) {
+      // ignore, just make sure the authorization is failed.
     }
+
     try {
       client.getPartition(dbName2, tabName3, new 
ArrayList<String>(Arrays.asList(partitionVal)));
       fail("NoSuchObjectException should have been thrown");
     } catch (NoSuchObjectException noe) {
       // ignore, just make sure the authorization is failed.
+    } catch (MetaException ex) {
+      // ignore, just make sure the authorization is failed.
     }
+
     try {
       client.getPartition(dbName2, tabName4, new 
ArrayList<String>(Arrays.asList(partitionVal)));
       fail("NoSuchObjectException should have been thrown");
     } catch (NoSuchObjectException noe) {
       // ignore, just make sure the authorization is failed.
+    } catch (MetaException ex) {
+      // ignore, just make sure the authorization is failed.
     }
 
     assertThat(client.getTables(dbName1, "tab*").size(), equalTo(1));
diff --git 
a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java
 
b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java
index f8f304f..6327f16 100644
--- 
a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java
+++ 
b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java
@@ -18,6 +18,7 @@
 
 package org.apache.sentry.tests.e2e.metastore;
 
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
@@ -27,6 +28,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Map;
 
+import org.apache.hadoop.hive.metastore.api.Database;
 import org.junit.Assert;
 
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -67,6 +69,8 @@ public class  TestMetastoreEndToEnd extends
   @BeforeClass
   public static void setupTestStaticConfiguration() throws Exception {
     setMetastoreListener = false;
+    enableAuthorizingObjectStore = false;
+    enableAuthorizeReadMetaData = true;
     
AbstractMetastoreTestWithStaticConfiguration.setupTestStaticConfiguration();
   }
 
@@ -638,4 +642,85 @@ public class  TestMetastoreEndToEnd extends
     Assert.assertNotNull(newPartition);
     client.close();
   }
+
+  @Test
+  public void testReadDatabase() throws Exception {
+    Database db;
+    HiveMetaStoreClient client;
+
+    // Create a database and verify the admin can read its metadata
+    client = context.getMetaStoreClient(ADMIN1);
+    dropMetastoreDBIfExists(client, dbName);
+    createMetastoreDB(client, dbName);
+    dropMetastoreDBIfExists(client, "db2");
+    createMetastoreDB(client, "db2");
+    db = client.getDatabase(dbName);
+    assertThat(db).isNotNull();
+    assertThat(db.getName()).isEqualTo(dbName);
+    db = client.getDatabase("db2");
+    assertThat(db).isNotNull();
+    assertThat(db.getName()).isEqualTo("db2");
+    client.close();
+
+    // Verify a user with ALL privileges can read the database metadata
+    client = context.getMetaStoreClient(USER1_1);
+    db = client.getDatabase(dbName);
+    assertThat(db).isNotNull();
+    assertThat(db.getName()).isEqualTo(dbName);
+    client.close();
+
+    // Verify a user without privileges cannot read the database metadata
+    client = context.getMetaStoreClient(USER1_1);
+    try {
+      client.getDatabase("db2");
+      fail("MetaException exepected because USER1_1 does not have privileges 
on 'db2'");
+    } catch (Exception e) {
+      assertThat(e).isInstanceOf(MetaException.class)
+        .hasMessageContaining("does not have privileges for DESCDATABASE");
+    }
+    client.close();
+  }
+
+  @Test
+  public void testReadTable() throws Exception {
+    Table tbl;
+    HiveMetaStoreClient client;
+
+    // Create a database and verify the admin can read its metadata
+    client = context.getMetaStoreClient(ADMIN1);
+    dropMetastoreDBIfExists(client, dbName);
+    createMetastoreDB(client, dbName);
+    createMetastoreTable(client, dbName, tabName1,
+      Lists.newArrayList(new FieldSchema("col1", "int", "")));
+    createMetastoreTable(client, dbName, "tbl1",
+      Lists.newArrayList(new FieldSchema("col1", "int", "")));
+    tbl = client.getTable(dbName, tabName1);
+    assertThat(tbl).isNotNull();
+    assertThat(tbl.getDbName()).isEqualTo(dbName);
+    assertThat(tbl.getTableName()).isEqualTo(tabName1);
+    tbl = client.getTable(dbName, "tbl1");
+    assertThat(tbl).isNotNull();
+    assertThat(tbl.getDbName()).isEqualTo(dbName);
+    assertThat(tbl.getTableName()).isEqualTo("tbl1");
+    client.close();
+
+    // Verify a user with ALL privileges can read the table metadata
+    client = context.getMetaStoreClient(USER3_1);
+    tbl = client.getTable(dbName, tabName1);
+    assertThat(tbl).isNotNull();
+    assertThat(tbl.getDbName()).isEqualTo(dbName);
+    assertThat(tbl.getTableName()).isEqualTo(tabName1);
+    client.close();
+
+    // Verify a user without privileges cannot read the table metadata
+    client = context.getMetaStoreClient(USER3_1);
+    try {
+      client.getTable(dbName, "tbl1");
+      fail("MetaException exepected because USER3_1 does not have privileges 
on 'tbl1'");
+    } catch (Exception e) {
+      assertThat(e).isInstanceOf(MetaException.class)
+        .hasMessageContaining("does not have privileges for DESCTABLE");
+    }
+    client.close();
+  }
 }

Reply via email to