This is an automated email from the ASF dual-hosted git repository.

dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new c004341  HIVE-22750: Consolidate LockType naming (Marton Bod via Denys 
Kuzmenko)
c004341 is described below

commit c004341241a4a9a6f9f985d36dc368073f95d520
Author: Marton Bod <m...@cloudera.com>
AuthorDate: Thu Apr 16 11:17:33 2020 +0200

    HIVE-22750: Consolidate LockType naming (Marton Bod via Denys Kuzmenko)
---
 .../hive/hcatalog/streaming/HiveEndPoint.java      |  2 +-
 .../streaming/mutate/client/lock/Lock.java         |  4 +-
 .../upgrade/hive/hive-schema-4.0.0.hive.sql        |  2 +-
 .../upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql   |  2 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java    | 18 ++---
 .../hadoop/hive/metastore/txn/TestTxnHandler.java  |  3 +-
 .../hadoop/hive/ql/lockmgr/TestDbTxnManager2.java  |  4 +-
 .../apache/hadoop/hive/metastore/api/LockType.java |  5 +-
 .../src/gen/thrift/gen-php/metastore/Types.php     |  2 +
 .../src/gen/thrift/gen-py/hive_metastore/ttypes.py |  3 +
 .../src/gen/thrift/gen-rb/hive_metastore_types.rb  |  5 +-
 .../hive/metastore/LockComponentBuilder.java       | 17 ++++-
 .../src/main/thrift/hive_metastore.thrift          |  1 +
 .../hadoop/hive/metastore/txn/TxnHandler.java      | 89 ++++++++--------------
 .../apache/hadoop/hive/metastore/txn/TxnUtils.java | 18 ++---
 .../hadoop/hive/metastore/utils/LockTypeUtil.java  | 36 +++++++++
 .../hive/metastore/TestHiveMetaStoreTxns.java      | 10 +--
 .../hive/metastore/utils/LockTypeUtilTest.java     | 30 ++++++++
 .../apache/hive/streaming/TransactionBatch.java    |  2 +-
 19 files changed, 158 insertions(+), 95 deletions(-)

diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
index e249b77..dc8c663 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
@@ -1014,7 +1014,7 @@ public class HiveEndPoint {
       LockComponentBuilder lockCompBuilder = new LockComponentBuilder()
               .setDbName(hiveEndPoint.database)
               .setTableName(hiveEndPoint.table)
-              .setShared()
+              .setSharedRead()
               .setOperationType(DataOperationType.INSERT);
       if (partNameForLock!=null && !partNameForLock.isEmpty() ) {
           lockCompBuilder.setPartitionName(partNameForLock);
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
index 52eb613..88970da 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
@@ -184,9 +184,9 @@ public class Lock {
       //todo: DataOperationType is set conservatively here, we'd really want 
to distinguish update/delete
       //and insert/select and if resource (that is written to) is ACID or not
       if (sinks.contains(table)) {
-        
componentBuilder.setSemiShared().setOperationType(DataOperationType.UPDATE).setIsTransactional(true);
+        
componentBuilder.setSharedWrite().setOperationType(DataOperationType.UPDATE).setIsTransactional(true);
       } else {
-        
componentBuilder.setShared().setOperationType(DataOperationType.INSERT).setIsTransactional(true);
+        
componentBuilder.setSharedRead().setOperationType(DataOperationType.INSERT).setIsTransactional(true);
       }
       LockComponent component = componentBuilder.build();
       requestBuilder.addLockComponent(component);
diff --git a/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql 
b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
index 03540bb..d857410 100644
--- a/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
+++ b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
@@ -1418,7 +1418,7 @@ SELECT DISTINCT
     HL.`HL_TABLE`,
     HL.`HL_PARTITION`,
     CASE WHEN HL.`HL_LOCK_STATE` = 'a' THEN 'acquired' WHEN HL.`HL_LOCK_STATE` 
= 'w' THEN 'waiting' END AS LOCK_STATE,
-    CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` 
= 'r' THEN 'shared' WHEN HL.`HL_LOCK_TYPE` = 'w' THEN 'semi-shared' END AS 
LOCK_TYPE,
+    CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` 
= 'x' THEN 'excl_write' WHEN HL.`HL_LOCK_TYPE` = 'r' THEN 'shared_read' WHEN 
HL.`HL_LOCK_TYPE` = 'w' THEN 'shared_write' END AS LOCK_TYPE,
     FROM_UNIXTIME(HL.`HL_LAST_HEARTBEAT`),
     FROM_UNIXTIME(HL.`HL_ACQUIRED_AT`),
     HL.`HL_USER`,
diff --git a/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql 
b/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql
index fa51874..0523e25 100644
--- a/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql
+++ b/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql
@@ -479,7 +479,7 @@ SELECT DISTINCT
     HL.`HL_TABLE`,
     HL.`HL_PARTITION`,
     CASE WHEN HL.`HL_LOCK_STATE` = 'a' THEN 'acquired' WHEN HL.`HL_LOCK_STATE` 
= 'w' THEN 'waiting' END AS LOCK_STATE,
-    CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` 
= 'r' THEN 'shared' WHEN HL.`HL_LOCK_TYPE` = 'w' THEN 'semi-shared' END AS 
LOCK_TYPE,
+    CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` 
= 'x' THEN 'excl_write' WHEN HL.`HL_LOCK_TYPE` = 'r' THEN 'shared_read' WHEN 
HL.`HL_LOCK_TYPE` = 'w' THEN 'shared_write' END AS LOCK_TYPE,
     FROM_UNIXTIME(HL.`HL_LAST_HEARTBEAT`),
     FROM_UNIXTIME(HL.`HL_ACQUIRED_AT`),
     HL.`HL_USER`,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 98d5a79..77878ca 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -2926,7 +2926,7 @@ public class AcidUtils {
     boolean skipReadLock = !conf.getBoolVar(ConfVars.HIVE_TXN_READ_LOCKS);
     boolean skipNonAcidReadLock = 
!conf.getBoolVar(ConfVars.HIVE_TXN_NONACID_READ_LOCKS);
 
-    // For each source to read, get a shared lock
+    // For each source to read, get a shared_read lock
     for (ReadEntity input : inputs) {
       if (input.isDummy()
           || !input.needsLock()
@@ -2938,7 +2938,7 @@ public class AcidUtils {
         continue;
       }
       LockComponentBuilder compBuilder = new LockComponentBuilder();
-      compBuilder.setShared();
+      compBuilder.setSharedRead();
       compBuilder.setOperationType(DataOperationType.SELECT);
 
       Table t = null;
@@ -2980,7 +2980,7 @@ public class AcidUtils {
     // For each source to write to, get the appropriate lock type.  If it's
     // an OVERWRITE, we need to get an exclusive lock.  If it's an insert (no
     // overwrite) than we need a shared.  If it's update or delete then we
-    // need a SEMI-SHARED.
+    // need a SHARED_WRITE.
     for (WriteEntity output : outputs) {
       LOG.debug("output is null " + (output == null));
       if (output.getType() == Entity.Type.DFS_DIR || output.getType() == 
Entity.Type.LOCAL_DIR || !AcidUtils
@@ -3027,7 +3027,7 @@ public class AcidUtils {
           if (conf.getBoolVar(HiveConf.ConfVars.TXN_OVERWRITE_X_LOCK)) {
             compBuilder.setExclusive();
           } else {
-            compBuilder.setSemiShared();
+            compBuilder.setSharedWrite();
           }
           compBuilder.setOperationType(DataOperationType.UPDATE);
         } else {
@@ -3038,7 +3038,7 @@ public class AcidUtils {
       case INSERT:
         assert t != null;
         if (AcidUtils.isTransactionalTable(t)) {
-          compBuilder.setShared();
+          compBuilder.setSharedRead();
         } else if (MetaStoreUtils.isNonNativeTable(t.getTTable())) {
           final HiveStorageHandler storageHandler = 
Preconditions.checkNotNull(t.getStorageHandler(),
               "Thought all the non native tables have an instance of storage 
handler");
@@ -3053,13 +3053,13 @@ public class AcidUtils {
           if (conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE)) 
{
             compBuilder.setExclusive();
           } else {  // this is backward compatible for non-ACID resources, w/o 
ACID semantics
-            compBuilder.setShared();
+            compBuilder.setSharedRead();
           }
         }
         compBuilder.setOperationType(DataOperationType.INSERT);
         break;
       case DDL_SHARED:
-        compBuilder.setShared();
+        compBuilder.setSharedRead();
         if (!output.isTxnAnalyze()) {
           // Analyze needs txn components to be present, otherwise an aborted 
analyze write ID
           // might be rolled under the watermark by compactor while stats 
written by it are
@@ -3069,11 +3069,11 @@ public class AcidUtils {
         break;
 
       case UPDATE:
-        compBuilder.setSemiShared();
+        compBuilder.setSharedWrite();
         compBuilder.setOperationType(DataOperationType.UPDATE);
         break;
       case DELETE:
-        compBuilder.setSemiShared();
+        compBuilder.setSharedWrite();
         compBuilder.setOperationType(DataOperationType.DELETE);
         break;
 
diff --git 
a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java 
b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
index 72f095d..1d21185 100644
--- a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
+++ b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
@@ -88,6 +88,7 @@ import static junit.framework.Assert.assertFalse;
 import static junit.framework.Assert.assertNull;
 import static junit.framework.Assert.assertTrue;
 import static junit.framework.Assert.fail;
+import static org.apache.hadoop.hive.metastore.utils.LockTypeUtil.getEncoding;
 
 /**
  * Tests for TxnHandler.
@@ -1394,7 +1395,7 @@ public class TestTxnHandler {
       stmt.executeUpdate("INSERT INTO \"HIVE_LOCKS\" (\"HL_LOCK_EXT_ID\", 
\"HL_LOCK_INT_ID\", \"HL_TXNID\", " +
           "\"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\", \"HL_LOCK_STATE\", 
\"HL_LOCK_TYPE\", \"HL_LAST_HEARTBEAT\", " +
           "\"HL_USER\", \"HL_HOST\") VALUES (1, 1, 1, 'MYDB', 'MYTABLE', 
'MYPARTITION', '" +
-          tHndlr.LOCK_WAITING + "', '" + tHndlr.LOCK_EXCLUSIVE + "', " + now + 
", 'fred', " +
+          tHndlr.LOCK_WAITING + "', '" + getEncoding(LockType.EXCLUSIVE) + "', 
" + now + ", 'fred', " +
           "'scooby.com')");
       conn.commit();
       tHndlr.closeDbConn(conn);
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java 
b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index 80fb1af..73d3b91 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -126,7 +126,7 @@ public class TestDbTxnManager2 {
   @Test
   public void testMetadataOperationLocks() throws Exception {
     boolean isStrict = 
conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE);
-    //to make insert into non-acid take shared lock
+    //to make insert into non-acid take shared_read lock
     conf.setBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE, false);
     dropTable(new String[] {"T"});
     driver.run("create table if not exists T (a int, b int)");
@@ -134,7 +134,7 @@ public class TestDbTxnManager2 {
     txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer");
     List<ShowLocksResponseElement> locks = getLocks();
     Assert.assertEquals("Unexpected lock count", 1, locks.size());
-    //since LM is using non strict mode we get shared lock
+    //since LM is using non strict mode we get shared_read lock
     checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T", null, 
locks);
 
     //simulate concurrent session
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockType.java
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockType.java
index 8ae4351..b38ed1c 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockType.java
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockType.java
@@ -14,7 +14,8 @@ import org.apache.thrift.TEnum;
 public enum LockType implements org.apache.thrift.TEnum {
   SHARED_READ(1),
   SHARED_WRITE(2),
-  EXCLUSIVE(3);
+  EXCLUSIVE(3),
+  EXCL_WRITE(4);
 
   private final int value;
 
@@ -41,6 +42,8 @@ public enum LockType implements org.apache.thrift.TEnum {
         return SHARED_WRITE;
       case 3:
         return EXCLUSIVE;
+      case 4:
+        return EXCL_WRITE;
       default:
         return null;
     }
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
index db4cfb9..9fb7ff0 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
@@ -89,10 +89,12 @@ final class LockType {
   const SHARED_READ = 1;
   const SHARED_WRITE = 2;
   const EXCLUSIVE = 3;
+  const EXCL_WRITE = 4;
   static public $__names = array(
     1 => 'SHARED_READ',
     2 => 'SHARED_WRITE',
     3 => 'EXCLUSIVE',
+    4 => 'EXCL_WRITE',
   );
 }
 
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index cf31379..4f317b3 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -127,17 +127,20 @@ class LockType:
   SHARED_READ = 1
   SHARED_WRITE = 2
   EXCLUSIVE = 3
+  EXCL_WRITE = 4
 
   _VALUES_TO_NAMES = {
     1: "SHARED_READ",
     2: "SHARED_WRITE",
     3: "EXCLUSIVE",
+    4: "EXCL_WRITE",
   }
 
   _NAMES_TO_VALUES = {
     "SHARED_READ": 1,
     "SHARED_WRITE": 2,
     "EXCLUSIVE": 3,
+    "EXCL_WRITE": 4,
   }
 
 class CompactionType:
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 849970e..e64ae0e 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -61,8 +61,9 @@ module LockType
   SHARED_READ = 1
   SHARED_WRITE = 2
   EXCLUSIVE = 3
-  VALUE_MAP = {1 => "SHARED_READ", 2 => "SHARED_WRITE", 3 => "EXCLUSIVE"}
-  VALID_VALUES = Set.new([SHARED_READ, SHARED_WRITE, EXCLUSIVE]).freeze
+  EXCL_WRITE = 4
+  VALUE_MAP = {1 => "SHARED_READ", 2 => "SHARED_WRITE", 3 => "EXCLUSIVE", 4 => 
"EXCL_WRITE"}
+  VALID_VALUES = Set.new([SHARED_READ, SHARED_WRITE, EXCLUSIVE, 
EXCL_WRITE]).freeze
 end
 
 module CompactionType
diff --git 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
index c739d4d..3488062 100644
--- 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
+++ 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java
@@ -45,19 +45,28 @@ public class LockComponentBuilder {
   }
 
   /**
-   * Set the lock to be semi-shared.
+   * Set the lock to be excl_write.
    * @return reference to this builder
    */
-  public LockComponentBuilder setSemiShared() {
+  public LockComponentBuilder setExclWrite() {
+    component.setType(LockType.EXCL_WRITE);
+    return this;
+  }
+
+  /**
+   * Set the lock to be shared_write.
+   * @return reference to this builder
+   */
+  public LockComponentBuilder setSharedWrite() {
     component.setType(LockType.SHARED_WRITE);
     return this;
   }
 
   /**
-   * Set the lock to be shared.
+   * Set the lock to be shared_read.
    * @return reference to this builder
    */
-  public LockComponentBuilder setShared() {
+  public LockComponentBuilder setSharedRead() {
     component.setType(LockType.SHARED_READ);
     return this;
   }
diff --git 
a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift 
b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
index 098ddec..1e3d6e9 100644
--- 
a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
+++ 
b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
@@ -175,6 +175,7 @@ enum LockType {
     SHARED_READ = 1,
     SHARED_WRITE = 2,
     EXCLUSIVE = 3,
+    EXCL_WRITE = 4,
 }
 
 enum CompactionType {
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index be4d63c..331fd4c 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -33,6 +33,7 @@ import java.util.Arrays;
 import java.util.BitSet;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -83,12 +84,14 @@ import org.apache.hadoop.hive.metastore.metrics.Metrics;
 import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
 import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
 import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.apache.hadoop.hive.metastore.utils.LockTypeUtil;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.utils.StringableMap;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import static org.apache.hadoop.hive.metastore.DatabaseProduct.MYSQL;
 import static 
org.apache.hadoop.hive.metastore.txn.TxnDbUtil.executeQueriesInBatch;
 import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
@@ -175,11 +178,6 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
   static final protected char LOCK_ACQUIRED = 'a';
   static final protected char LOCK_WAITING = 'w';
 
-  // Lock types
-  static final protected char LOCK_EXCLUSIVE = 'e';
-  static final protected char LOCK_SHARED = 'r';
-  static final protected char LOCK_SEMI_SHARED = 'w';
-
   private static final int ALLOWED_REPEATED_DEADLOCKS = 10;
   private static final Logger LOG = 
LoggerFactory.getLogger(TxnHandler.class.getName());
   private static final Long TEMP_HIVE_LOCK_ID = -1L;
@@ -2564,6 +2562,7 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
                   + lc + " agentInfo=" + rqst.getAgentInfo());
         }
         intLockId++;
+        String lockType = LockTypeUtil.getEncodingAsStr(lc.getType());
 
         pstmt.setLong(1, TEMP_HIVE_LOCK_ID);
         pstmt.setLong(2, intLockId);
@@ -2572,7 +2571,7 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
         pstmt.setString(5, normalizeCase(lc.getTablename()));
         pstmt.setString(6, normalizeCase(lc.getPartitionname()));
         pstmt.setString(7, Character.toString(LOCK_WAITING));
-        pstmt.setString(8, Character.toString(getLockChar(lc.getType())));
+        pstmt.setString(8, lockType);
         pstmt.setString(9, rqst.getUser());
         pstmt.setString(10, rqst.getHostname());
         pstmt.setString(11, rqst.getAgentInfo());
@@ -2590,19 +2589,6 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
     }
   }
 
-  private char getLockChar(LockType lockType) {
-    switch (lockType) {
-      case EXCLUSIVE:
-        return LOCK_EXCLUSIVE;
-      case SHARED_READ:
-        return LOCK_SHARED;
-      case SHARED_WRITE:
-        return LOCK_SEMI_SHARED;
-      default:
-        return 'z';
-    }
-  }
-
   private static String normalizeCase(String s) {
     return s == null ? null : s.toLowerCase();
   }
@@ -2845,12 +2831,12 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
             case LOCK_WAITING: e.setState(LockState.WAITING); break;
             default: throw new MetaException("Unknown lock state " + 
rs.getString(6).charAt(0));
           }
-          switch (rs.getString(7).charAt(0)) {
-            case LOCK_SEMI_SHARED: e.setType(LockType.SHARED_WRITE); break;
-            case LOCK_EXCLUSIVE: e.setType(LockType.EXCLUSIVE); break;
-            case LOCK_SHARED: e.setType(LockType.SHARED_READ); break;
-            default: throw new MetaException("Unknown lock type " + 
rs.getString(6).charAt(0));
-          }
+
+          char lockChar = rs.getString(7).charAt(0);
+          LockType lockType = LockTypeUtil.getLockTypeFromEncoding(lockChar)
+                  .orElseThrow(() -> new MetaException("Unknown lock type: " + 
lockChar));
+          e.setType(lockType);
+
           e.setLastheartbeat(rs.getLong(8));
           long acquiredAt = rs.getLong(9);
           if (!rs.wasNull()) e.setAcquiredat(acquiredAt);
@@ -4036,13 +4022,9 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
         default:
           throw new MetaException("Unknown lock state " + 
rs.getString("HL_LOCK_STATE").charAt(0));
       }
-      switch (rs.getString("HL_LOCK_TYPE").charAt(0)) {
-        case LOCK_EXCLUSIVE: type = LockType.EXCLUSIVE; break;
-        case LOCK_SHARED: type = LockType.SHARED_READ; break;
-        case LOCK_SEMI_SHARED: type = LockType.SHARED_WRITE; break;
-        default:
-          throw new MetaException("Unknown lock type " + 
rs.getString("HL_LOCK_TYPE").charAt(0));
-      }
+      char lockChar = rs.getString("HL_LOCK_TYPE").charAt(0);
+      type = LockTypeUtil.getLockTypeFromEncoding(lockChar)
+              .orElseThrow(() -> new MetaException("Unknown lock type: " + 
lockChar));
       txnId = rs.getLong("HL_TXNID");//returns 0 if value is NULL
     }
     LockInfo(ShowLocksResponseElement e) {
@@ -4130,37 +4112,32 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
    * be acquired once it sees S(DB).  So need to check stricter locks first.
    */
   private final static class LockTypeComparator implements 
Comparator<LockType> {
+    // the higher the integer value, the more restrictive the lock type is
+    private static final Map<LockType, Integer> orderOfRestrictiveness = new 
EnumMap<>(LockType.class);
+    static {
+        orderOfRestrictiveness.put(LockType.SHARED_READ, 1);
+        orderOfRestrictiveness.put(LockType.SHARED_WRITE, 2);
+        orderOfRestrictiveness.put(LockType.EXCL_WRITE, 3);
+        orderOfRestrictiveness.put(LockType.EXCLUSIVE, 4);
+    }
+    @Override
     public boolean equals(Object other) {
       return this == other;
     }
+    @Override
     public int compare(LockType t1, LockType t2) {
-      switch (t1) {
-        case EXCLUSIVE:
-          if(t2 == LockType.EXCLUSIVE) {
-            return 0;
-          }
-          return 1;
-        case SHARED_WRITE:
-          switch (t2) {
-            case EXCLUSIVE:
-              return -1;
-            case SHARED_WRITE:
-              return 0;
-            case SHARED_READ:
-              return 1;
-            default:
-              throw new RuntimeException("Unexpected LockType: " + t2);
-          }
-        case SHARED_READ:
-          if(t2 == LockType.SHARED_READ) {
-            return 0;
-          }
-          return -1;
-        default:
-          throw new RuntimeException("Unexpected LockType: " + t1);
+      Integer t1Restrictiveness = orderOfRestrictiveness.get(t1);
+      Integer t2Restrictiveness = orderOfRestrictiveness.get(t2);
+      if (t1Restrictiveness == null) {
+        throw new RuntimeException("Unexpected LockType: " + t1);
       }
+      if (t2Restrictiveness == null) {
+        throw new RuntimeException("Unexpected LockType: " + t2);
+      }
+      return Integer.compare(t1Restrictiveness, t2Restrictiveness);
     }
   }
+
   private enum LockAction {ACQUIRE, WAIT, KEEP_LOOKING}
 
   // A jump table to figure out whether to wait, acquire,
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
index da38a6b..b3a1f82 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
@@ -236,15 +236,15 @@ public class TxnUtils {
    * Note that, in this method, "a composite 'IN' clause" is defined as "a 
list of multiple 'IN'
    * clauses in a query".
    *
-   * @param queries   OUT: Array of query strings
-   * @param prefix    IN:  Part of the query that comes before IN list
-   * @param suffix    IN:  Part of the query that comes after IN list
-   * @param inList    IN:  the list with IN list values
-   * @param inColumn  IN:  single column name of IN list operator
-   * @param addParens IN:  add a pair of parenthesis outside the IN lists
-   *                       e.g. "(id in (1,2,3) OR id in (4,5,6))"
-   * @param notIn     IN:  is this for building a 'NOT IN' composite clause?
-   * @return          OUT: a list of the count of IN list values that are in 
each of the corresponding queries
+   * @param queries   IN-OUT: Array of query strings
+   * @param prefix    IN:     Part of the query that comes before IN list
+   * @param suffix    IN:     Part of the query that comes after IN list
+   * @param inList    IN:     the list with IN list values
+   * @param inColumn  IN:     single column name of IN list operator
+   * @param addParens IN:     add a pair of parenthesis outside the IN lists
+   *                          e.g. "(id in (1,2,3) OR id in (4,5,6))"
+   * @param notIn     IN:     is this for building a 'NOT IN' composite clause?
+   * @return          OUT:    a list of the count of IN list values that are 
in each of the corresponding queries
    */
   public static List<Integer> buildQueryWithINClauseStrings(Configuration 
conf, List<String> queries, StringBuilder prefix,
       StringBuilder suffix, List<String> inList, String inColumn, boolean 
addParens, boolean notIn) {
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/LockTypeUtil.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/LockTypeUtil.java
new file mode 100644
index 0000000..f928bf7
--- /dev/null
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/LockTypeUtil.java
@@ -0,0 +1,36 @@
+package org.apache.hadoop.hive.metastore.utils;
+
+import com.google.common.collect.BiMap;
+import com.google.common.collect.EnumHashBiMap;
+import org.apache.hadoop.hive.metastore.api.LockType;
+
+import java.util.Optional;
+
+/**
+ * Provides utility methods for {@link 
org.apache.hadoop.hive.metastore.api.LockType}.
+ * One use case is to encapsulate each lock type's persistence encoding, since
+ * Thrift-generated enums cannot be extended with character values.
+ */
+public class LockTypeUtil {
+
+    private static final char UNKNOWN_LOCK_TYPE_ENCODING = 'z';
+    private static final BiMap<LockType, Character> persistenceEncodings = 
EnumHashBiMap.create(LockType.class);
+    static {
+        persistenceEncodings.put(LockType.SHARED_READ, 'r');
+        persistenceEncodings.put(LockType.SHARED_WRITE, 'w');
+        persistenceEncodings.put(LockType.EXCL_WRITE, 'x');
+        persistenceEncodings.put(LockType.EXCLUSIVE, 'e');
+    }
+
+    public static char getEncoding(LockType lockType) {
+        return persistenceEncodings.getOrDefault(lockType, 
UNKNOWN_LOCK_TYPE_ENCODING);
+    }
+
+    public static String getEncodingAsStr(LockType lockType) {
+        return Character.toString(getEncoding(lockType));
+    }
+
+    public static Optional<LockType> getLockTypeFromEncoding(char encoding) {
+        return 
Optional.ofNullable(persistenceEncodings.inverse().get(encoding));
+    }
+}
diff --git 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
index 1dfc105..5f3db52 100644
--- 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
+++ 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
@@ -222,13 +222,13 @@ public class TestHiveMetaStoreTxns {
     rqstBuilder.addLockComponent(new LockComponentBuilder()
         .setDbName("mydb")
         .setTableName("yourtable")
-        .setSemiShared()
+        .setSharedWrite()
         .setOperationType(DataOperationType.NO_TXN)
         .build());
     rqstBuilder.addLockComponent(new LockComponentBuilder()
         .setDbName("yourdb")
         .setOperationType(DataOperationType.NO_TXN)
-        .setShared()
+        .setSharedRead()
         .build());
     rqstBuilder.setUser("fred");
 
@@ -255,18 +255,18 @@ public class TestHiveMetaStoreTxns {
         .setDbName("mydb")
         .setTableName("mytable")
         .setPartitionName("mypartition")
-        .setSemiShared()
+        .setSharedWrite()
         .setOperationType(DataOperationType.UPDATE)
         .build())
       .addLockComponent(new LockComponentBuilder()
         .setDbName("mydb")
         .setTableName("yourtable")
-        .setSemiShared()
+        .setSharedWrite()
         .setOperationType(DataOperationType.UPDATE)
         .build())
       .addLockComponent(new LockComponentBuilder()
         .setDbName("yourdb")
-        .setShared()
+        .setSharedRead()
         .setOperationType(DataOperationType.SELECT)
         .build())
       .setUser("fred");
diff --git 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/LockTypeUtilTest.java
 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/LockTypeUtilTest.java
new file mode 100644
index 0000000..3d69a6e
--- /dev/null
+++ 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/LockTypeUtilTest.java
@@ -0,0 +1,30 @@
+package org.apache.hadoop.hive.metastore.utils;
+
+import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+import org.apache.hadoop.hive.metastore.api.LockType;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
+@Category(MetastoreUnitTest.class)
+public class LockTypeUtilTest {
+
+    @Test
+    public void testGetEncoding() {
+        assertEquals('r', LockTypeUtil.getEncoding(LockType.SHARED_READ));
+        assertEquals('w', LockTypeUtil.getEncoding(LockType.SHARED_WRITE));
+        assertEquals('x', LockTypeUtil.getEncoding(LockType.EXCL_WRITE));
+        assertEquals('e', LockTypeUtil.getEncoding(LockType.EXCLUSIVE));
+    }
+
+    @Test
+    public void testGetLockType() {
+        assertEquals(LockType.SHARED_READ, 
LockTypeUtil.getLockTypeFromEncoding('r').get());
+        assertEquals(LockType.SHARED_WRITE, 
LockTypeUtil.getLockTypeFromEncoding('w').get());
+        assertEquals(LockType.EXCL_WRITE, 
LockTypeUtil.getLockTypeFromEncoding('x').get());
+        assertEquals(LockType.EXCLUSIVE, 
LockTypeUtil.getLockTypeFromEncoding('e').get());
+        assertFalse(LockTypeUtil.getLockTypeFromEncoding('y').isPresent());
+    }
+}
diff --git a/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java 
b/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java
index d440650..40239ab 100644
--- a/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java
+++ b/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java
@@ -439,7 +439,7 @@ public class TransactionBatch extends 
AbstractStreamingTransaction {
     LockComponentBuilder lockCompBuilder = new LockComponentBuilder()
         .setDbName(connection.getDatabase())
         .setTableName(connection.getTable().getTableName())
-        .setShared()
+        .setSharedRead()
         .setOperationType(DataOperationType.INSERT);
     if (connection.isDynamicPartitioning()) {
       lockCompBuilder.setIsDynamicPartitionWrite(true);

Reply via email to