HIVE-18124 clean up isAcidTable() API vs isInsertOnlyTable() (Eugene Koifman, 
reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9efed65a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9efed65a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9efed65a

Branch: refs/heads/master
Commit: 9efed65a5a139691b7862b2344f01d48ff02ea06
Parents: 12a33fd
Author: Eugene Koifman <ekoif...@hortonworks.com>
Authored: Mon Dec 18 15:22:29 2017 -0800
Committer: Eugene Koifman <ekoif...@hortonworks.com>
Committed: Mon Dec 18 15:23:20 2017 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  2 +-
 .../mapreduce/FosterStorageHandler.java         |  6 +-
 .../hcatalog/mapreduce/HCatOutputFormat.java    |  6 +-
 .../hive/hcatalog/streaming/TestStreaming.java  |  2 +-
 .../streaming/mutate/StreamingAssert.java       |  2 +-
 .../hive/ql/txn/compactor/TestCompactor.java    |  2 +-
 .../hive/llap/io/api/impl/LlapRecordReader.java |  2 +-
 .../llap/io/encoded/OrcEncodedDataReader.java   |  2 +-
 .../org/apache/hadoop/hive/ql/QueryPlan.java    |  2 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  2 +-
 .../apache/hadoop/hive/ql/exec/FetchTask.java   |  4 +-
 .../hadoop/hive/ql/exec/SMBMapJoinOperator.java |  2 +-
 .../hadoop/hive/ql/exec/mr/MapredLocalTask.java |  2 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java | 62 ++++++++++++--------
 .../hadoop/hive/ql/io/HiveInputFormat.java      |  4 +-
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |  6 +-
 .../apache/hadoop/hive/ql/io/orc/OrcSplit.java  |  2 +-
 .../io/orc/VectorizedOrcAcidRowBatchReader.java |  2 +-
 .../ql/io/orc/VectorizedOrcInputFormat.java     |  2 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java    | 12 ++--
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  4 +-
 .../BucketingSortingReduceSinkOptimizer.java    |  2 +-
 .../hive/ql/optimizer/StatsOptimizer.java       |  6 +-
 .../hive/ql/parse/BaseSemanticAnalyzer.java     |  6 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |  8 +--
 .../hive/ql/parse/LoadSemanticAnalyzer.java     |  6 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  | 54 +++++++----------
 .../ql/parse/UpdateDeleteSemanticAnalyzer.java  |  4 +-
 .../apache/hadoop/hive/ql/plan/LoadDesc.java    |  4 +-
 .../hadoop/hive/ql/plan/TableScanDesc.java      |  2 +-
 .../apache/hadoop/hive/ql/stats/Partish.java    |  2 +-
 .../hive/ql/txn/compactor/CompactorMR.java      |  2 +-
 .../apache/hadoop/hive/ql/TestTxnLoadData.java  | 16 ++---
 .../hive/ql/io/orc/TestInputOutputFormat.java   | 12 ++--
 .../hive/ql/io/orc/TestOrcRawRecordMerger.java  | 19 +++---
 .../TestVectorizedOrcAcidRowBatchReader.java    |  2 +-
 .../test/queries/clientnegative/delete_sorted.q |  7 ---
 .../test/queries/clientnegative/insert_sorted.q |  4 +-
 .../clientnegative/insert_values_sorted.q       |  7 ---
 .../test/queries/clientnegative/update_sorted.q |  7 ---
 .../results/clientnegative/delete_sorted.q.out  |  9 ---
 .../results/clientnegative/insert_sorted.q.out  | 22 +++++--
 .../clientnegative/insert_values_sorted.q.out   |  9 ---
 .../results/clientnegative/update_sorted.q.out  |  9 ---
 .../results/clientpositive/llap/mm_all.q.out    |  1 -
 ql/src/test/results/clientpositive/mm_all.q.out |  1 -
 .../hive/metastore/AcidEventListener.java       |  6 +-
 .../TransactionalValidationListener.java        | 26 +++++---
 .../hadoop/hive/metastore/txn/TxnUtils.java     | 20 +++++--
 49 files changed, 189 insertions(+), 214 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 711dfbd..28b52d4 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1307,7 +1307,7 @@ public class HiveConf extends Configuration {
     HIVE_SCHEMA_EVOLUTION("hive.exec.schema.evolution", true,
         "Use schema evolution to convert self-describing file format's data to 
the schema desired by the reader."),
 
-    HIVE_TRANSACTIONAL_TABLE_SCAN("hive.transactional.table.scan", false,
+    HIVE_ACID_TABLE_SCAN("hive.acid.table.scan", false,
         "internal usage only -- do transaction (ACID) table scan.", true),
 
     HIVE_TRANSACTIONAL_NUM_EVENTS_IN_MEMORY("hive.transactional.events.mem", 
10000000,

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
----------------------------------------------------------------------
diff --git 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
index 040906f..02ccd48 100644
--- 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
+++ 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
@@ -131,10 +131,12 @@ public class FosterStorageHandler extends 
DefaultStorageHandler {
         jobProperties.put(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
columnNamesSb.toString());
         jobProperties.put(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
typeNamesSb.toString());
 
-        boolean isAcidTable = 
AcidUtils.isTablePropertyTransactional(tableProperties);
-        AcidUtils.setTransactionalTableScan(jobProperties, isAcidTable);
+        boolean isTransactionalTable = 
AcidUtils.isTablePropertyTransactional(tableProperties);
         AcidUtils.AcidOperationalProperties acidOperationalProperties =
                 AcidUtils.getAcidOperationalProperties(tableProperties);
+        if(acidOperationalProperties.isSplitUpdate()) {
+          AcidUtils.setAcidTableScan(jobProperties, isTransactionalTable);
+        }
         AcidUtils.setAcidOperationalProperties(jobProperties, 
acidOperationalProperties);
       }
     } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java
----------------------------------------------------------------------
diff --git 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java
 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java
index 996bb02..4f74349 100644
--- 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java
+++ 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java
@@ -113,9 +113,9 @@ public class HCatOutputFormat extends HCatBaseOutputFormat {
       if (sd.getSortCols() != null && !sd.getSortCols().isEmpty()) {
         throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a 
partition with sorted column definition from Pig/Mapreduce is not supported");
       }
-
-      if (AcidUtils.isAcidTable(table)) {
-        throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into an 
insert-only ACID table from Pig/Mapreduce is not supported");
+      if (AcidUtils.isTransactionalTable(table)) {
+        throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a 
transactional table "
+          + table.getFullyQualifiedName() + " from Pig/Mapreduce is not 
supported");
       }
 
       // Set up a common id hash for this job, so that when we create any 
temporary directory

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
----------------------------------------------------------------------
diff --git 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index 49aad39..dc8eee1 100644
--- 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -564,7 +564,7 @@ public class TestStreaming {
     job.set(BUCKET_COUNT, Integer.toString(buckets));
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "id,msg");
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string");
-    AcidUtils.setTransactionalTableScan(job,true);
+    AcidUtils.setAcidTableScan(job,true);
     job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString());
     InputSplit[] splits = inf.getSplits(job, buckets);

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java
----------------------------------------------------------------------
diff --git 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java
 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java
index d5429fb..25db0fb 100644
--- 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java
+++ 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java
@@ -143,7 +143,7 @@ public class StreamingAssert {
     job.set(hive_metastoreConstants.BUCKET_COUNT, 
Integer.toString(table.getSd().getNumBuckets()));
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "id,msg");
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string");
-    AcidUtils.setTransactionalTableScan(job,true);
+    AcidUtils.setAcidTableScan(job,true);
     job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString());
     InputSplit[] splits = inputFormat.getSplits(job, 1);

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
index a1cd9eb..75eeaf6 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
@@ -1352,7 +1352,7 @@ public class TestCompactor {
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, columnNamesProperty);
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, columnTypesProperty);
     conf.set(hive_metastoreConstants.BUCKET_COUNT, 
Integer.toString(numBuckets));
-    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, 
true);
+    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true);
     AcidInputFormat.RawReader<OrcStruct> reader =
         aif.getRawReader(conf, true, bucket, txnList, base, deltas);
     RecordIdentifier identifier = reader.createKey();

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
----------------------------------------------------------------------
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
index 5f010be..a4b877b 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
@@ -139,7 +139,7 @@ class LlapRecordReader
     this.counters = new QueryFragmentCounters(job, taskCounters);
     this.counters.setDesc(QueryFragmentCounters.Desc.MACHINE, hostName);
 
-    isAcidScan = HiveConf.getBoolVar(jobConf, 
ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN);
+    isAcidScan = HiveConf.getBoolVar(jobConf, ConfVars.HIVE_ACID_TABLE_SCAN);
     TypeDescription schema = OrcInputFormat.getDesiredRowTypeDescr(
         job, isAcidScan, Integer.MAX_VALUE);
     if (isAcidScan) {

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
----------------------------------------------------------------------
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
index a1ff360..21f90a7 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
@@ -228,7 +228,7 @@ public class OrcEncodedDataReader extends 
CallableWithNdc<Void>
       readerSchema = fileMetadata.getSchema();
     }
     readerIncludes = OrcInputFormat.genIncludedColumns(readerSchema, 
includedColumnIds);
-    if (HiveConf.getBoolVar(jobConf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN)) {
+    if (HiveConf.getBoolVar(jobConf, ConfVars.HIVE_ACID_TABLE_SCAN)) {
       fileIncludes = OrcInputFormat.shiftReaderIncludedForAcid(readerIncludes);
     } else {
       fileIncludes = OrcInputFormat.genIncludedColumns(fileSchema, 
includedColumnIds);

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java 
b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
index 0a25707..be6f5d9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
@@ -146,7 +146,7 @@ public class QueryPlan implements Serializable {
     this.operation = operation;
     this.autoCommitValue = sem.getAutoCommitValue();
     this.resultSchema = resultSchema;
-    this.acidResourcesInQuery = sem.hasAcidInQuery();
+    this.acidResourcesInQuery = sem.hasTransactionalInQuery();
     this.acidSinks = sem.getAcidFileSinks();
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 05041cd..7fc07b9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -2163,7 +2163,7 @@ public class DDLTask extends Task<DDLWork> implements 
Serializable {
   private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException 
{
 
     Table tbl = db.getTable(desc.getTableName());
-    if (!AcidUtils.isFullAcidTable(tbl) && 
!AcidUtils.isInsertOnlyTable(tbl.getParameters())) {
+    if (!AcidUtils.isAcidTable(tbl) && 
!AcidUtils.isInsertOnlyTable(tbl.getParameters())) {
       throw new HiveException(ErrorMsg.NONACID_COMPACTION_NOT_SUPPORTED, 
tbl.getDbName(),
           tbl.getTableName());
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
index 6589bb2..fc6052b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java
@@ -24,7 +24,6 @@ import java.util.List;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.CommandNeedRetryException;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.DriverContext;
@@ -42,7 +41,6 @@ import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.util.StringUtils;
 
 /**
  * FetchTask implementation.
@@ -81,7 +79,7 @@ public class FetchTask extends Task<FetchWork> implements 
Serializable {
         // push down filters
         HiveInputFormat.pushFilters(job, ts);
 
-        AcidUtils.setTransactionalTableScan(job, ts.getConf().isAcidTable());
+        AcidUtils.setAcidTableScan(job, ts.getConf().isAcidTable());
         AcidUtils.setAcidOperationalProperties(job, 
ts.getConf().getAcidOperationalProperties());
       }
       sink = work.getSink();

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
index 64aa744..6df1e32 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
@@ -209,7 +209,7 @@ public class SMBMapJoinOperator extends 
AbstractMapJoinOperator<SMBJoinDesc> imp
       // push down filters
       HiveInputFormat.pushFilters(jobClone, ts);
 
-      AcidUtils.setTransactionalTableScan(jobClone, 
ts.getConf().isAcidTable());
+      AcidUtils.setAcidTableScan(jobClone, ts.getConf().isAcidTable());
       AcidUtils.setAcidOperationalProperties(jobClone, 
ts.getConf().getAcidOperationalProperties());
 
       ts.passExecContext(getExecContext());

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
index b6a988d..60d2b7b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
@@ -485,7 +485,7 @@ public class MapredLocalTask extends Task<MapredLocalWork> 
implements Serializab
       // push down filters
       HiveInputFormat.pushFilters(jobClone, ts);
 
-      AcidUtils.setTransactionalTableScan(jobClone, 
ts.getConf().isAcidTable());
+      AcidUtils.setAcidTableScan(jobClone, ts.getConf().isAcidTable());
       AcidUtils.setAcidOperationalProperties(jobClone, 
ts.getConf().getAcidOperationalProperties());
 
       // create a fetch operator

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index a85713b..31316f0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -353,7 +353,7 @@ public class AcidUtils {
     }
     return result;
   }
-
+  //This is used for (full) Acid tables.  InsertOnly use NOT_ACID
   public enum Operation implements Serializable {
     NOT_ACID, INSERT, UPDATE, DELETE;
   }
@@ -419,6 +419,22 @@ public class AcidUtils {
     }
   }
 
+  /**
+   * Current syntax for creating full acid transactional tables is any one of 
following 3 ways:
+   * create table T (a int, b int) stored as orc 
tblproperties('transactional'='true').
+   * create table T (a int, b int) stored as orc 
tblproperties('transactional'='true',
+   * 'transactional_properties'='default').
+   * create table T (a int, b int) stored as orc 
tblproperties('transactional'='true',
+   * 'transactional_properties'='split_update').
+   * These are all identical and create a table capable of 
insert/update/delete/merge operations
+   * with full ACID semantics at Snapshot Isolation.  These tables require ORC 
input/output format.
+   *
+   * To create a 1/4 acid, aka Micro Managed table:
+   * create table T (a int, b int) stored as orc 
tblproperties('transactional'='true',
+   * 'transactional_properties'='insert_only').
+   * These tables only support insert operation (also with full ACID semantics 
at SI).
+   *
+   */
   public static class AcidOperationalProperties {
     private int description = 0x00;
     public static final int SPLIT_UPDATE_BIT = 0x01;
@@ -1204,16 +1220,18 @@ public class AcidUtils {
     }
     return resultStr != null && resultStr.equalsIgnoreCase("true");
   }
-
-  public static void setTransactionalTableScan(Map<String, String> parameters, 
boolean isAcidTable) {
-    parameters.put(ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN.varname, 
Boolean.toString(isAcidTable));
+  /**
+   * Means it's a full acid table
+   */
+  public static void setAcidTableScan(Map<String, String> parameters, boolean 
isAcidTable) {
+    parameters.put(ConfVars.HIVE_ACID_TABLE_SCAN.varname, 
Boolean.toString(isAcidTable));
   }
 
   /**
    * Means it's a full acid table
    */
-  public static void setTransactionalTableScan(Configuration conf, boolean 
isFullAcidTable) {
-    HiveConf.setBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, 
isFullAcidTable);
+  public static void setAcidTableScan(Configuration conf, boolean 
isFullAcidTable) {
+    HiveConf.setBoolVar(conf, ConfVars.HIVE_ACID_TABLE_SCAN, isFullAcidTable);
   }
   /**
    * @param p - not null
@@ -1221,15 +1239,12 @@ public class AcidUtils {
   public static boolean isDeleteDelta(Path p) {
     return p.getName().startsWith(DELETE_DELTA_PREFIX);
   }
-  /** Checks if a table is a valid ACID table.
-   * Note, users are responsible for using the correct TxnManager. We do not 
look at
-   * SessionState.get().getTxnMgr().supportsAcid() here
-   * @param table table
-   * @return true if table is a legit ACID table, false otherwise
-   * ToDo: this shoudl be renamed isTransactionalTable() since that is what 
it's checking and covers
-   * both Acid and MM tables. HIVE-18124
+
+  /**
+   * Should produce the same result as
+   * {@link 
org.apache.hadoop.hive.metastore.txn.TxnUtils#isTransactionalTable(org.apache.hadoop.hive.metastore.api.Table)}
    */
-  public static boolean isAcidTable(Table table) {
+  public static boolean isTransactionalTable(Table table) {
     if (table == null) {
       return false;
     }
@@ -1240,11 +1255,7 @@ public class AcidUtils {
 
     return tableIsTransactional != null && 
tableIsTransactional.equalsIgnoreCase("true");
   }
-  /**
-   * ToDo: this shoudl be renamed isTransactionalTable() since that is what 
it's checking and convers
-   * both Acid and MM tables. HIVE-18124
-   */
-  public static boolean isAcidTable(CreateTableDesc table) {
+  public static boolean isTransactionalTable(CreateTableDesc table) {
     if (table == null || table.getTblProps() == null) {
       return false;
     }
@@ -1256,13 +1267,14 @@ public class AcidUtils {
   }
 
   /**
-   * after isTransactionalTable() then make this isAcid() HIVE-18124
+   * Should produce the same result as
+   * {@link 
org.apache.hadoop.hive.metastore.txn.TxnUtils#isAcidTable(org.apache.hadoop.hive.metastore.api.Table)}
    */
-  public static boolean isFullAcidTable(Table table) {
-    return isAcidTable(table) && !AcidUtils.isInsertOnlyTable(table);
+  public static boolean isAcidTable(Table table) {
+    return isTransactionalTable(table) && !AcidUtils.isInsertOnlyTable(table);
   }
   
-  public static boolean isFullAcidTable(CreateTableDesc td) {
+  public static boolean isAcidTable(CreateTableDesc td) {
     if (td == null || td.getTblProps() == null) {
       return false;
     }
@@ -1392,7 +1404,7 @@ public class AcidUtils {
 
 
   /**
-   * Checks if a table is an ACID table that only supports INSERT, but not 
UPDATE/DELETE
+   * Checks if a table is a transactional table that only supports INSERT, but 
not UPDATE/DELETE
    * @param params table properties
    * @return true if table is an INSERT_ONLY table, false otherwise
    */
@@ -1400,7 +1412,7 @@ public class AcidUtils {
     return isInsertOnlyTable(params, false);
   }
   public static boolean isInsertOnlyTable(Table table) {
-    return isAcidTable(table) && 
getAcidOperationalProperties(table).isInsertOnly();
+    return isTransactionalTable(table) && 
getAcidOperationalProperties(table).isInsertOnly();
   }
 
   // TODO [MM gap]: CTAS may currently be broken. It used to work. See the old 
code, and why isCtas isn't used?

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
index b35df69..0718995 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
@@ -468,7 +468,7 @@ public class HiveInputFormat<K extends WritableComparable, 
V extends Writable>
     try {
       Utilities.copyTablePropertiesToConf(table, conf);
       if(tableScan != null) {
-        AcidUtils.setTransactionalTableScan(conf, 
tableScan.getConf().isAcidTable());
+        AcidUtils.setAcidTableScan(conf, tableScan.getConf().isAcidTable());
       }
     } catch (HiveException e) {
       throw new IOException(e);
@@ -851,7 +851,7 @@ public class HiveInputFormat<K extends WritableComparable, 
V extends Writable>
         // push down filters
         pushFilters(jobConf, ts);
 
-        AcidUtils.setTransactionalTableScan(job, ts.getConf().isAcidTable());
+        AcidUtils.setAcidTableScan(job, ts.getConf().isAcidTable());
         AcidUtils.setAcidOperationalProperties(job, 
ts.getConf().getAcidOperationalProperties());
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index becdc71..09737fb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -214,7 +214,7 @@ public class OrcInputFormat implements 
InputFormat<NullWritable, OrcStruct>,
     /*
      * Fallback for the case when OrcSplit flags do not contain hasBase and 
deltas
      */
-    return HiveConf.getBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN);
+    return HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_TABLE_SCAN);
   }
 
   private static class OrcRecordReader
@@ -309,7 +309,7 @@ public class OrcInputFormat implements 
InputFormat<NullWritable, OrcStruct>,
                                                   long offset, long length
                                                   ) throws IOException {
 
-    boolean isTransactionalTableScan = HiveConf.getBoolVar(conf, 
ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN);
+    boolean isTransactionalTableScan = HiveConf.getBoolVar(conf, 
ConfVars.HIVE_ACID_TABLE_SCAN);
     if (isTransactionalTableScan) {
       raiseAcidTablesMustBeReadWithAcidReaderException(conf);
     }
@@ -1692,7 +1692,7 @@ public class OrcInputFormat implements 
InputFormat<NullWritable, OrcStruct>,
     }
 
     boolean isTransactionalTableScan =
-        HiveConf.getBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN);
+        HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_TABLE_SCAN);
     boolean isSchemaEvolution = HiveConf.getBoolVar(conf, 
ConfVars.HIVE_SCHEMA_EVOLUTION);
     TypeDescription readerSchema =
         OrcInputFormat.getDesiredRowTypeDescr(conf, isTransactionalTableScan, 
Integer.MAX_VALUE);

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
index edffa5b..fdb3808 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
@@ -234,7 +234,7 @@ public class OrcSplit extends FileSplit implements 
ColumnarSplit, LlapAwareSplit
   public boolean canUseLlapIo(Configuration conf) {
     final boolean hasDelta = deltas != null && !deltas.isEmpty();
     final boolean isAcidRead = HiveConf.getBoolVar(conf,
-        HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN);
+        HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN);
     final boolean isVectorized = HiveConf.getBoolVar(conf,
         HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED);
     final AcidUtils.AcidOperationalProperties acidOperationalProperties

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
index 990e0cb..c33fd13 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
@@ -158,7 +158,7 @@ public class VectorizedOrcAcidRowBatchReader
   private VectorizedOrcAcidRowBatchReader(JobConf conf, OrcSplit orcSplit, 
Reporter reporter,
       VectorizedRowBatchCtx rowBatchCtx) throws IOException {
     this.rbCtx = rowBatchCtx;
-    final boolean isAcidRead = HiveConf.getBoolVar(conf, 
ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN);
+    final boolean isAcidRead = HiveConf.getBoolVar(conf, 
ConfVars.HIVE_ACID_TABLE_SCAN);
     final AcidUtils.AcidOperationalProperties acidOperationalProperties
             = AcidUtils.getAcidOperationalProperties(conf);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
index 736034d..9e481ae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
@@ -64,7 +64,7 @@ public class VectorizedOrcInputFormat extends 
FileInputFormat<NullWritable, Vect
     VectorizedOrcRecordReader(Reader file, Configuration conf,
         FileSplit fileSplit) throws IOException {
 
-      boolean isAcidRead = HiveConf.getBoolVar(conf, 
ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN);
+      boolean isAcidRead = HiveConf.getBoolVar(conf, 
ConfVars.HIVE_ACID_TABLE_SCAN);
       if (isAcidRead) {
         OrcInputFormat.raiseAcidTablesMustBeReadWithAcidReaderException(conf);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 48ac22d..3968b0e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -313,7 +313,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
     issue ROLLBACK but these tables won't rollback.
     Can do this by checking ReadEntity/WriteEntity to determine whether it's 
reading/writing
     any non acid and raise an appropriate error
-    * Driver.acidSinks and Driver.acidInQuery can be used if any acid is in 
the query*/
+    * Driver.acidSinks and Driver.transactionalInQuery can be used if any acid 
is in the query*/
   }
 
   /**
@@ -325,7 +325,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
     //in a txn assuming we can determine the target is a suitable table type.
     if(queryPlan.getOperation() == HiveOperation.LOAD && 
queryPlan.getOutputs() != null && queryPlan.getOutputs().size() == 1) {
       WriteEntity writeEntity = queryPlan.getOutputs().iterator().next();
-      if(AcidUtils.isFullAcidTable(writeEntity.getTable()) || 
AcidUtils.isInsertOnlyTable(writeEntity.getTable())) {
+      if(AcidUtils.isAcidTable(writeEntity.getTable()) || 
AcidUtils.isInsertOnlyTable(writeEntity.getTable())) {
         switch (writeEntity.getWriteType()) {
           case INSERT:
             //allow operation in a txn
@@ -426,7 +426,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
           continue;
       }
       if(t != null) {
-        compBuilder.setIsAcid(AcidUtils.isFullAcidTable(t));
+        compBuilder.setIsAcid(AcidUtils.isAcidTable(t));
       }
       LockComponent comp = compBuilder.build();
       LOG.debug("Adding lock component to lock request " + comp.toString());
@@ -480,7 +480,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
           break;
         case INSERT_OVERWRITE:
           t = getTable(output);
-          if (AcidUtils.isAcidTable(t)) {
+          if (AcidUtils.isTransactionalTable(t)) {
             compBuilder.setSemiShared();
             compBuilder.setOperationType(DataOperationType.UPDATE);
           } else {
@@ -490,7 +490,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
           break;
         case INSERT:
           assert t != null;
-          if(AcidUtils.isFullAcidTable(t)) {
+          if(AcidUtils.isAcidTable(t)) {
             compBuilder.setShared();
           }
           else {
@@ -524,7 +524,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
               output.getWriteType().toString());
       }
       if(t != null) {
-        compBuilder.setIsAcid(AcidUtils.isFullAcidTable(t));
+        compBuilder.setIsAcid(AcidUtils.isAcidTable(t));
       }
 
       compBuilder.setIsDynamicPartitionWrite(output.isDynamicPartitionWrite());

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 50bdce8..7e059da2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -1720,7 +1720,7 @@ public class Hive {
     Path tblDataLocationPath =  tbl.getDataLocation();
     boolean isMmTableWrite = AcidUtils.isInsertOnlyTable(tbl.getParameters());
     assert tbl.getPath() != null : "null==getPath() for " + tbl.getTableName();
-    boolean isFullAcidTable = AcidUtils.isFullAcidTable(tbl);
+    boolean isFullAcidTable = AcidUtils.isAcidTable(tbl);
     try {
       // Get the partition object if it already exists
       Partition oldPart = getPartition(tbl, partSpec, false);
@@ -2317,7 +2317,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
     Table tbl = getTable(tableName);
     assert tbl.getPath() != null : "null==getPath() for " + tbl.getTableName();
     boolean isMmTable = AcidUtils.isInsertOnlyTable(tbl);
-    boolean isFullAcidTable = AcidUtils.isFullAcidTable(tbl);
+    boolean isFullAcidTable = AcidUtils.isAcidTable(tbl);
     HiveConf sessionConf = SessionState.getSessionConf();
     if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) {
       newFiles = Collections.synchronizedList(new ArrayList<Path>());

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
index 31d2b23..7f5e543 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
@@ -410,7 +410,7 @@ public class BucketingSortingReduceSinkOptimizer extends 
Transform {
       if (stack.get(0) instanceof TableScanOperator) {
         TableScanOperator tso = ((TableScanOperator)stack.get(0));
         Table tab = tso.getConf().getTableMetadata();
-        if (AcidUtils.isFullAcidTable(tab)) {
+        if (AcidUtils.isAcidTable(tab)) {
           /*ACID tables have complex directory layout and require merging of 
delta files
           * on read thus we should not try to read bucket files directly*/
           return null;

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
index 85f198b..0f3a8d1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
@@ -19,13 +19,10 @@ package org.apache.hadoop.hive.ql.optimizer;
 
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Set;
 import java.util.Stack;
 
@@ -277,7 +274,8 @@ public class StatsOptimizer extends Transform {
           Logger.info("Table " + tbl.getTableName() + " is external. Skip 
StatsOptimizer.");
           return null;
         }
-        if (AcidUtils.isAcidTable(tbl)) {
+        if (AcidUtils.isTransactionalTable(tbl)) {
+          //todo: should this be OK for MM table?
           Logger.info("Table " + tbl.getTableName() + " is ACID table. Skip 
StatsOptimizer.");
           return null;
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index d36d24d..2c3af5d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -125,7 +125,7 @@ public abstract class BaseSemanticAnalyzer {
 
   // whether any ACID table or Insert-only (mm) table is involved in a query
   // They both require DbTxnManager and both need to recordValidTxns when 
acquiring locks in Driver
-  protected boolean acidInQuery;
+  protected boolean transactionalInQuery;
 
   protected HiveTxnManager txnManager;
 
@@ -1487,8 +1487,8 @@ public abstract class BaseSemanticAnalyzer {
     return acidFileSinks;
   }
 
-  public boolean hasAcidInQuery() {
-    return acidInQuery;
+  public boolean hasTransactionalInQuery() {
+    return transactionalInQuery;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 971a061..8b8d913 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -1906,9 +1906,9 @@ public class DDLSemanticAnalyzer extends 
BaseSemanticAnalyzer {
     if(desc != null && desc.getProps() != null && 
Boolean.parseBoolean(desc.getProps().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL)))
 {
       convertingToAcid = true;
     }
-    if(!AcidUtils.isAcidTable(tab) && convertingToAcid) {
-      //non to acid conversion (property itself) must be mutexed to prevent 
concurrent writes.
-      // See HIVE-16688 for use case.
+    if(!AcidUtils.isTransactionalTable(tab) && convertingToAcid) {
+      //non-acid to transactional conversion (property itself) must be mutexed 
to prevent concurrent writes.
+      // See HIVE-16688 for use cases.
       return WriteType.DDL_EXCLUSIVE;
     }
     return WriteEntity.determineAlterTableWriteType(op);
@@ -2128,7 +2128,7 @@ public class DDLSemanticAnalyzer extends 
BaseSemanticAnalyzer {
       }
 
       // transactional tables are compacted and no longer needs to be 
bucketed, so not safe for merge/concatenation
-      boolean isAcid = AcidUtils.isAcidTable(tblObj);
+      boolean isAcid = AcidUtils.isTransactionalTable(tblObj);
       if (isAcid) {
         throw new 
SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_TABLE_TRANSACTIONAL.getMsg());
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index e600f7a..4535c3e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -159,12 +159,12 @@ public class LoadSemanticAnalyzer extends 
BaseSemanticAnalyzer {
           throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(ast,
               "source contains directory: " + oneSrc.getPath().toString()));
         }
-        if(AcidUtils.isFullAcidTable(table)) {
+        if(AcidUtils.isAcidTable(table)) {
           if(!AcidUtils.originalBucketFilter.accept(oneSrc.getPath())) {
             //acid files (e.g. bucket_0000) have ROW_ID embedded in them and 
so can't be simply
             //copied to a table so only allow non-acid files for now
             throw new 
SemanticException(ErrorMsg.ACID_LOAD_DATA_INVALID_FILE_NAME,
-              oneSrc.getPath().getName(), table.getDbName() + "." + 
table.getTableName());
+              oneSrc.getPath().getName(), table.getFullyQualifiedName());
           }
         }
       }
@@ -283,7 +283,7 @@ public class LoadSemanticAnalyzer extends 
BaseSemanticAnalyzer {
 
     Long txnId = null;
     int stmtId = -1;
-    if (AcidUtils.isAcidTable(ts.tableHandle)) {
+    if (AcidUtils.isTransactionalTable(ts.tableHandle)) {
       txnId = SessionState.get().getTxnMgr().getCurrentTxnId();
       stmtId = SessionState.get().getTxnMgr().getWriteIdAndIncrement();
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index dcda8b3..c41e371 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -6716,7 +6716,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
         nullOrder.append(sortOrder == 
BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC ? 'a' : 'z');
       }
       input = genReduceSinkPlan(input, partnCols, sortCols, order.toString(), 
nullOrder.toString(),
-              maxReducers, (AcidUtils.isFullAcidTable(dest_tab) ?
+              maxReducers, (AcidUtils.isAcidTable(dest_tab) ?
               getAcidType(table_desc.getOutputFileFormatClass(), dest) : 
AcidUtils.Operation.NOT_ACID));
       
reduceSinkOperatorsAddedByEnforceBucketingSorting.add((ReduceSinkOperator)input.getParentOperators().get(0));
       ctx.setMultiFileSpray(multiFileSpray);
@@ -6781,8 +6781,8 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
     Integer dest_type = qbm.getDestTypeForAlias(dest);
 
     Table dest_tab = null; // destination table if any
-    boolean destTableIsAcid = false;     // true for full ACID table and MM 
table
-    boolean destTableIsFullAcid = false; // should the destination table be 
written to using ACID
+    boolean destTableIsTransactional;     // true for full ACID table and MM 
table
+    boolean destTableIsFullAcid; // should the destination table be written to 
using ACID
     boolean destTableIsTemporary = false;
     boolean destTableIsMaterialization = false;
     Partition dest_part = null;// destination partition if any
@@ -6803,8 +6803,8 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
     case QBMetaData.DEST_TABLE: {
 
       dest_tab = qbm.getDestTableForAlias(dest);
-      destTableIsAcid = AcidUtils.isAcidTable(dest_tab);
-      destTableIsFullAcid = AcidUtils.isFullAcidTable(dest_tab);
+      destTableIsTransactional = AcidUtils.isTransactionalTable(dest_tab);
+      destTableIsFullAcid = AcidUtils.isAcidTable(dest_tab);
       destTableIsTemporary = dest_tab.isTemporary();
 
       // Is the user trying to insert into a external tables
@@ -6874,11 +6874,9 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
         AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
         if (destTableIsFullAcid) {
           acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest);
+          //todo: should this be done for MM?  is it ok to use 
CombineHiveInputFormat with MM
           checkAcidConstraints(qb, table_desc, dest_tab);
         }
-        if (AcidUtils.isInsertOnlyTable(table_desc.getProperties())) {
-          acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest);
-        }
         if (isMmTable) {
           txnId = SessionState.get().getTxnMgr().getCurrentTxnId();
         } else {
@@ -6891,7 +6889,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
         // For Acid table, Insert Overwrite shouldn't replace the table 
content. We keep the old
         // deltas and base and leave them up to the cleaner to clean up
         LoadFileType loadType = 
(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),
-                dest_tab.getTableName()) && !destTableIsAcid)
+                dest_tab.getTableName()) && !destTableIsTransactional)
                 ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING;
         ltd.setLoadFileType(loadType);
         ltd.setLbCtx(lbCtx);
@@ -6915,8 +6913,8 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
 
       dest_part = qbm.getDestPartitionForAlias(dest);
       dest_tab = dest_part.getTable();
-      destTableIsAcid = AcidUtils.isAcidTable(dest_tab);
-      destTableIsFullAcid = AcidUtils.isFullAcidTable(dest_tab);
+      destTableIsTransactional = AcidUtils.isTransactionalTable(dest_tab);
+      destTableIsFullAcid = AcidUtils.isAcidTable(dest_tab);
 
       checkExternalTable(dest_tab);
 
@@ -6951,11 +6949,9 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
       AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
       if (destTableIsFullAcid) {
         acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest);
+        //todo: should this be done for MM?  is it ok to use 
CombineHiveInputFormat with MM?
         checkAcidConstraints(qb, table_desc, dest_tab);
       }
-      if (AcidUtils.isInsertOnlyTable(dest_part.getTable().getParameters())) {
-        acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest);
-      }
       if (isMmTable) {
         txnId = SessionState.get().getTxnMgr().getCurrentTxnId();
       } else {
@@ -6966,7 +6962,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
       // For Acid table, Insert Overwrite shouldn't replace the table content. 
We keep the old
       // deltas and base and leave them up to the cleaner to clean up
       LoadFileType loadType = 
(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),
-              dest_tab.getTableName()) && !destTableIsAcid) // // Both 
Full-acid and MM tables are excluded.
+              dest_tab.getTableName()) && !destTableIsTransactional) // // 
Both Full-acid and MM tables are excluded.
               ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING;
       ltd.setLoadFileType(loadType);
       ltd.setLbCtx(lbCtx);
@@ -7039,8 +7035,8 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
         viewDesc.setSchema(new ArrayList<FieldSchema>(field_schemas));
       }
 
-      destTableIsAcid = tblDesc != null && AcidUtils.isAcidTable(tblDesc);
-      destTableIsFullAcid = tblDesc != null && 
AcidUtils.isFullAcidTable(tblDesc);
+      destTableIsTransactional = tblDesc != null && 
AcidUtils.isTransactionalTable(tblDesc);
+      destTableIsFullAcid = tblDesc != null && AcidUtils.isAcidTable(tblDesc);
 
       boolean isDestTempFile = true;
       if (!ctx.isMRTmpFileURI(dest_path.toUri().toString())) {
@@ -7053,7 +7049,10 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
       boolean isDfsDir = (dest_type.intValue() == QBMetaData.DEST_DFS_FILE);
       // Create LFD even for MM CTAS - it's a no-op move, but it still seems 
to be used for stats.
       loadFileWork.add(new LoadFileDesc(tblDesc, viewDesc, queryTmpdir, 
dest_path, isDfsDir, cols,
-          colTypes, destTableIsAcid ? Operation.INSERT : Operation.NOT_ACID, 
isMmCtas));
+          colTypes,
+        destTableIsFullAcid ?//there is a change here - prev version had 
'transadtional', one beofre' acid'
+            Operation.INSERT : Operation.NOT_ACID,
+          isMmCtas));
       if (tblDesc == null) {
         if (viewDesc != null) {
           table_desc = PlanUtils.getTableDesc(viewDesc, cols, colTypes);
@@ -7140,7 +7139,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
     }
 
     FileSinkDesc fileSinkDesc = createFileSinkDesc(dest, table_desc, dest_part,
-        dest_path, currentTableId, destTableIsAcid, destTableIsTemporary,
+        dest_path, currentTableId, destTableIsFullAcid, 
destTableIsTemporary,//this was 1/4 acid
         destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS,
         canBeMerged, dest_tab, txnId, isMmCtas, dest_type, qb);
     if (isMmCtas) {
@@ -7495,11 +7494,6 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
     return colName;
   }
 
-  // Check constraints on acid tables.  This includes
-  // * Check that the table is bucketed
-  // * Check that the table is not sorted
-  // This method assumes you have already decided that this is an Acid write.  
Don't call it if
-  // that isn't true.
   private void checkAcidConstraints(QB qb, TableDesc tableDesc,
                                     Table table) throws SemanticException {
     /*
@@ -7512,10 +7506,6 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
     backwards incompatible.
     */
     conf.set(AcidUtils.CONF_ACID_KEY, "true");
-
-    if (table.getSortCols() != null && table.getSortCols().size() > 0) {
-      throw new SemanticException(ErrorMsg.ACID_NO_SORTED_BUCKETS, 
table.getTableName());
-    }
   }
 
   /**
@@ -12123,8 +12113,8 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
       if (p != null) {
         tbl = p.getTable();
       }
-      if (tbl != null && (AcidUtils.isFullAcidTable(tbl) || 
AcidUtils.isInsertOnlyTable(tbl.getParameters()))) {
-        acidInQuery = true;
+      if (tbl != null && AcidUtils.isTransactionalTable(tbl)) {
+        transactionalInQuery = true;
         checkAcidTxnManager(tbl);
       }
     }
@@ -12186,8 +12176,8 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
         tbl = writeEntity.getTable();
       }
 
-      if (tbl != null && (AcidUtils.isFullAcidTable(tbl) || 
AcidUtils.isInsertOnlyTable(tbl.getParameters()))) {
-        acidInQuery = true;
+      if (tbl != null && AcidUtils.isTransactionalTable(tbl)) {
+        transactionalInQuery = true;
         checkAcidTxnManager(tbl);
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
index 7ecd1ff..075aac5 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
@@ -175,7 +175,7 @@ public class UpdateDeleteSemanticAnalyzer extends 
SemanticAnalyzer {
     }
     if(!foundColumnInTargetTable) {
       throw new 
SemanticException(ErrorMsg.INVALID_TARGET_COLUMN_IN_SET_CLAUSE, 
colName.getText(),
-        getDotName(new String[] {targetTable.getDbName(), 
targetTable.getTableName()}));
+        targetTable.getFullyQualifiedName());
     }
   }
   private ASTNode findLHSofAssignment(ASTNode assignment) {
@@ -318,7 +318,7 @@ public class UpdateDeleteSemanticAnalyzer extends 
SemanticAnalyzer {
   private void validateTargetTable(Table mTable) throws SemanticException {
     if (mTable.getTableType() == TableType.VIRTUAL_VIEW ||
       mTable.getTableType() == TableType.MATERIALIZED_VIEW) {
-        LOG.error("Table " + getDotName(new String[] {mTable.getDbName(), 
mTable.getTableName()}) + " is a view or materialized view");
+        LOG.error("Table " + mTable.getFullyQualifiedName() + " is a view or 
materialized view");
         throw new SemanticException(ErrorMsg.UPDATE_DELETE_VIEW.getMsg());
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java
index 9477df6..6017137 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java
@@ -35,10 +35,10 @@ public class LoadDesc implements Serializable {
    * Need to remember whether this is an acid compliant operation, and if so 
whether it is an
    * insert, update, or delete.
    */
-  final AcidUtils.Operation writeType;
+  private final AcidUtils.Operation writeType;
 
 
-  public LoadDesc(final Path sourcePath, AcidUtils.Operation writeType) {
+  LoadDesc(final Path sourcePath, AcidUtils.Operation writeType) {
     this.sourcePath = sourcePath;
     this.writeType = writeType;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
index 8d966c7..6e15234 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
@@ -135,7 +135,7 @@ public class TableScanDesc extends AbstractOperatorDesc 
implements IStatsGatherD
     this.alias = alias;
     this.virtualCols = vcs;
     this.tableMetadata = tblMetadata;
-    isAcidTable = AcidUtils.isFullAcidTable(this.tableMetadata);
+    isAcidTable = AcidUtils.isAcidTable(this.tableMetadata);
     if (isAcidTable) {
       acidOperationalProperties = 
AcidUtils.getAcidOperationalProperties(this.tableMetadata);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java 
b/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
index e8d3184..6bba6b0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
@@ -48,7 +48,7 @@ public abstract class Partish {
   // rename
   @Deprecated
   public final boolean isAcid() {
-    return AcidUtils.isFullAcidTable(getTable());
+    return AcidUtils.isAcidTable(getTable());
   }
 
   public abstract Table getTable();

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
index a804527..b9077d1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
@@ -364,7 +364,7 @@ public class CompactorMR {
     }
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, colNames.toString());
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, colTypes.toString());
-    HiveConf.setBoolVar(job, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, 
true);
+    HiveConf.setBoolVar(job, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true);
     HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, 
HiveInputFormat.class.getName());
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
index b98c74a..b0dcc7d 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
@@ -18,25 +18,16 @@
 package org.apache.hadoop.hive.ql;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
-import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
-import org.apache.hadoop.io.NullWritable;
 import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
-import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.File;
-import java.io.IOException;
 import java.util.List;
 
 /**
@@ -260,7 +251,12 @@ public class TestTxnLoadData extends 
TxnCommandsBaseForTests {
 
     String testQuery = isVectorized ? "select ROW__ID, a, b from T order by 
ROW__ID" :
       "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID";
-
+/*
+{"transactionid":0,"bucketid":536870912,"rowid":0}     0       2/000000_0
+{"transactionid":0,"bucketid":536870912,"rowid":1}     0       4/000000_0
+{"transactionid":24,"bucketid":536870912,"rowid":0}    4       
4/delta_0000024_0000024_0000/000000_0
+{"transactionid":24,"bucketid":536870912,"rowid":1}    5       
5/delta_0000024_0000024_0000/000000_0
+*/
     String[][] expected = new String[][] {
       //from pre-acid insert
       {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t0\t2", 
"t/000000_0"},

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index 5d26524..cec1d34 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -836,7 +836,7 @@ public class TestInputOutputFormat {
   public void testEtlCombinedStrategy() throws Exception {
     conf.set(HiveConf.ConfVars.HIVE_ORC_SPLIT_STRATEGY.varname, "ETL");
     conf.set(HiveConf.ConfVars.HIVE_ORC_SPLIT_DIRECTORY_BATCH_MS.varname, 
"1000000");
-    AcidUtils.setTransactionalTableScan(conf, true);
+    AcidUtils.setAcidTableScan(conf, true);
     conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     conf.set(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, 
"default");
 
@@ -2285,7 +2285,7 @@ public class TestInputOutputFormat {
 
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
BigRow.getColumnNamesProperty());
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
BigRow.getColumnTypesProperty());
-    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, 
true);
+    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true);
 
     org.apache.hadoop.mapred.RecordReader<NullWritable, VectorizedRowBatch>
         reader = inputFormat.getRecordReader(splits[0], conf, Reporter.NULL);
@@ -3377,7 +3377,7 @@ public class TestInputOutputFormat {
   public void testACIDReaderNoFooterSerialize() throws Exception {
     MockFileSystem fs = new MockFileSystem(conf);
     MockPath mockPath = new MockPath(fs, "mock:///mocktable5");
-    conf.set("hive.transactional.table.scan", "true");
+    conf.set(ConfVars.HIVE_ACID_TABLE_SCAN.varname, "true");
     conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
MyRow.getColumnNamesProperty());
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
MyRow.getColumnTypesProperty());
@@ -3458,7 +3458,7 @@ public class TestInputOutputFormat {
   public void testACIDReaderFooterSerialize() throws Exception {
     MockFileSystem fs = new MockFileSystem(conf);
     MockPath mockPath = new MockPath(fs, "mock:///mocktable6");
-    conf.set("hive.transactional.table.scan", "true");
+    conf.set(ConfVars.HIVE_ACID_TABLE_SCAN.varname, "true");
     conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
MyRow.getColumnNamesProperty());
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
MyRow.getColumnTypesProperty());
@@ -3569,7 +3569,7 @@ public class TestInputOutputFormat {
 
     //set up props for read
     conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
-    AcidUtils.setTransactionalTableScan(conf, true);
+    AcidUtils.setAcidTableScan(conf, true);
 
     OrcInputFormat orcInputFormat = new OrcInputFormat();
     InputSplit[] splits = orcInputFormat.getSplits(conf, 2);
@@ -3648,7 +3648,7 @@ public class TestInputOutputFormat {
 
     //set up props for read
     conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
-    AcidUtils.setTransactionalTableScan(conf, true);
+    AcidUtils.setAcidTableScan(conf, true);
 
     OrcInputFormat orcInputFormat = new OrcInputFormat();
     InputSplit[] splits = orcInputFormat.getSplits(conf, 2);

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
index 030f012..56148d0 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hive.ql.io.orc;
 
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.io.BucketCodec;
 import org.apache.orc.CompressionKind;
@@ -67,10 +65,7 @@ import org.mockito.Mockito;
 import com.google.common.collect.Lists;
 
 import java.io.File;
-import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
@@ -389,7 +384,7 @@ public class TestOrcRawRecordMerger {
     Configuration conf = new Configuration();
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "col1");
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "string");
-    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, 
true);
+    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true);
     Reader reader = Mockito.mock(Reader.class, settings);
     RecordReader recordReader = Mockito.mock(RecordReader.class, settings);
 
@@ -607,7 +602,7 @@ public class TestOrcRawRecordMerger {
         OrcFile.readerOptions(conf));
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
MyRow.getColumnNamesProperty());
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
MyRow.getColumnTypesProperty());
-    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, 
true);
+    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true);
     OrcRawRecordMerger merger =
         new OrcRawRecordMerger(conf, true, baseReader, false, BUCKET,
             createMaximalTxnList(), new Reader.Options(),
@@ -686,7 +681,7 @@ public class TestOrcRawRecordMerger {
 
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
MyRow.getColumnNamesProperty());
     conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
MyRow.getColumnTypesProperty());
-    AcidUtils.setTransactionalTableScan(conf,true);
+    AcidUtils.setAcidTableScan(conf,true);
     conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
 
     //the first "split" is for base/
@@ -1154,7 +1149,7 @@ public class TestOrcRawRecordMerger {
     JobConf job = new JobConf();
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
BigRow.getColumnNamesProperty());
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
BigRow.getColumnTypesProperty());
-    AcidUtils.setTransactionalTableScan(job,true);
+    AcidUtils.setAcidTableScan(job,true);
     job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     job.set("mapred.min.split.size", "1");
     job.set("mapred.max.split.size", "2");
@@ -1289,7 +1284,7 @@ public class TestOrcRawRecordMerger {
     job.set("mapred.input.dir", root.toString());
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
BigRow.getColumnNamesProperty());
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
BigRow.getColumnTypesProperty());
-    AcidUtils.setTransactionalTableScan(job,true);
+    AcidUtils.setAcidTableScan(job,true);
     job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     InputSplit[] splits = inf.getSplits(job, 5);
     //base has 10 rows, so 5 splits, 1 delta has 2 rows so 1 split, and 1 
delta has 3 so 2 splits
@@ -1386,7 +1381,7 @@ public class TestOrcRawRecordMerger {
     job.set("bucket_count", "1");
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
MyRow.getColumnNamesProperty());
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
MyRow.getColumnTypesProperty());
-    AcidUtils.setTransactionalTableScan(job,true);
+    AcidUtils.setAcidTableScan(job,true);
     job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
     InputSplit[] splits = inf.getSplits(job, 5);
     assertEquals(2, splits.length);
@@ -1460,7 +1455,7 @@ public class TestOrcRawRecordMerger {
     job.set("bucket_count", "2");
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, 
MyRow.getColumnNamesProperty());
     job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, 
MyRow.getColumnTypesProperty());
-    AcidUtils.setTransactionalTableScan(job,true);
+    AcidUtils.setAcidTableScan(job,true);
     job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
 
     // read the keys before the delta is flushed

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
index 95e3463..3eb33da 100644
--- 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
@@ -92,7 +92,7 @@ public class TestVectorizedOrcAcidRowBatchReader {
     conf = new JobConf();
     conf.set("bucket_count", "1");
     conf.set(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true");
-    conf.setBoolean(HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN.varname, 
true);
+    conf.setBoolean(HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN.varname, true);
     conf.set(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, 
"default");
     conf.setInt(HiveConf.ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES.varname,
         AcidUtils.AcidOperationalProperties.getDefault().toInt());

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/test/queries/clientnegative/delete_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/delete_sorted.q 
b/ql/src/test/queries/clientnegative/delete_sorted.q
deleted file mode 100644
index 9f82c1f..0000000
--- a/ql/src/test/queries/clientnegative/delete_sorted.q
+++ /dev/null
@@ -1,7 +0,0 @@
-set hive.support.concurrency=true;
-set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-
-
-create table acid_insertsort(a int, b varchar(128)) partitioned by (ds string) 
clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true');
-
-delete from acid_insertsort where a = 3;

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/test/queries/clientnegative/insert_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/insert_sorted.q 
b/ql/src/test/queries/clientnegative/insert_sorted.q
index cd1a69c..56bcb62 100644
--- a/ql/src/test/queries/clientnegative/insert_sorted.q
+++ b/ql/src/test/queries/clientnegative/insert_sorted.q
@@ -1,7 +1,7 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
+create table mm_insertsort(a int, b varchar(128)) clustered by (a) sorted by 
(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true', 
"transactional_properties"="insert_only");
+insert into mm_insertsort values (1, '1'),(2, '2');
 
 create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by 
(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
-
-insert into table acid_insertsort select cint, cast(cstring1 as varchar(128)) 
from alltypesorc where cint is not null order by cint limit 10;

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/test/queries/clientnegative/insert_values_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/insert_values_sorted.q 
b/ql/src/test/queries/clientnegative/insert_values_sorted.q
deleted file mode 100644
index ee26402..0000000
--- a/ql/src/test/queries/clientnegative/insert_values_sorted.q
+++ /dev/null
@@ -1,7 +0,0 @@
-set hive.support.concurrency=true;
-set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-
-
-create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by 
(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
-
-insert into table acid_insertsort values (1, 'abc'),(2, 'def');

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/test/queries/clientnegative/update_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_sorted.q 
b/ql/src/test/queries/clientnegative/update_sorted.q
deleted file mode 100644
index f9e5db5..0000000
--- a/ql/src/test/queries/clientnegative/update_sorted.q
+++ /dev/null
@@ -1,7 +0,0 @@
-set hive.support.concurrency=true;
-set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-
-
-create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by 
(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
-
-update acid_insertsort set b = 'fred' where b = 'bob';

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/test/results/clientnegative/delete_sorted.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/delete_sorted.q.out 
b/ql/src/test/results/clientnegative/delete_sorted.q.out
deleted file mode 100644
index 0d248d0..0000000
--- a/ql/src/test/results/clientnegative/delete_sorted.q.out
+++ /dev/null
@@ -1,9 +0,0 @@
-PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) 
partitioned by (ds string) clustered by (a) sorted by (b) into 2 buckets stored 
as orc TBLPROPERTIES ('transactional'='true')
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@acid_insertsort
-POSTHOOK: query: create table acid_insertsort(a int, b varchar(128)) 
partitioned by (ds string) clustered by (a) sorted by (b) into 2 buckets stored 
as orc TBLPROPERTIES ('transactional'='true')
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@acid_insertsort
-FAILED: SemanticException [Error 10298]: ACID insert, update, delete not 
supported on tables that are sorted, table acid_insertsort

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/test/results/clientnegative/insert_sorted.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/insert_sorted.q.out 
b/ql/src/test/results/clientnegative/insert_sorted.q.out
index 50dd5eb..0b1d253 100644
--- a/ql/src/test/results/clientnegative/insert_sorted.q.out
+++ b/ql/src/test/results/clientnegative/insert_sorted.q.out
@@ -1,9 +1,21 @@
-PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered 
by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true')
+PREHOOK: query: create table mm_insertsort(a int, b varchar(128)) clustered by 
(a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true', "transactional_properties"="insert_only")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
-PREHOOK: Output: default@acid_insertsort
-POSTHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered 
by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true')
+PREHOOK: Output: default@mm_insertsort
+POSTHOOK: query: create table mm_insertsort(a int, b varchar(128)) clustered 
by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true', "transactional_properties"="insert_only")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
-POSTHOOK: Output: default@acid_insertsort
-FAILED: SemanticException [Error 10298]: ACID insert, update, delete not 
supported on tables that are sorted, table acid_insertsort
+POSTHOOK: Output: default@mm_insertsort
+PREHOOK: query: insert into mm_insertsort values (1, '1'),(2, '2')
+PREHOOK: type: QUERY
+PREHOOK: Output: default@mm_insertsort
+POSTHOOK: query: insert into mm_insertsort values (1, '1'),(2, '2')
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@mm_insertsort
+POSTHOOK: Lineage: mm_insertsort.a EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+POSTHOOK: Lineage: mm_insertsort.b EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered 
by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_insertsort
+FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Table 
default.acid_insertsort cannot support full ACID functionality since it is 
sorted.)

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/test/results/clientnegative/insert_values_sorted.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/insert_values_sorted.q.out 
b/ql/src/test/results/clientnegative/insert_values_sorted.q.out
deleted file mode 100644
index 50dd5eb..0000000
--- a/ql/src/test/results/clientnegative/insert_values_sorted.q.out
+++ /dev/null
@@ -1,9 +0,0 @@
-PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered 
by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true')
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@acid_insertsort
-POSTHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered 
by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true')
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@acid_insertsort
-FAILED: SemanticException [Error 10298]: ACID insert, update, delete not 
supported on tables that are sorted, table acid_insertsort

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/test/results/clientnegative/update_sorted.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/update_sorted.q.out 
b/ql/src/test/results/clientnegative/update_sorted.q.out
deleted file mode 100644
index 50dd5eb..0000000
--- a/ql/src/test/results/clientnegative/update_sorted.q.out
+++ /dev/null
@@ -1,9 +0,0 @@
-PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered 
by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true')
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@acid_insertsort
-POSTHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered 
by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true')
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@acid_insertsort
-FAILED: SemanticException [Error 10298]: ACID insert, update, delete not 
supported on tables that are sorted, table acid_insertsort

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/test/results/clientpositive/llap/mm_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out 
b/ql/src/test/results/clientpositive/llap/mm_all.q.out
index 0374ea6..767ab47 100644
--- a/ql/src/test/results/clientpositive/llap/mm_all.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out
@@ -138,7 +138,6 @@ STAGE PLANS:
               output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
               serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
               name: default.part_mm
-          Write Type: INSERT
           micromanaged table: true
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/ql/src/test/results/clientpositive/mm_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mm_all.q.out 
b/ql/src/test/results/clientpositive/mm_all.q.out
index 34fd899..66597c0 100644
--- a/ql/src/test/results/clientpositive/mm_all.q.out
+++ b/ql/src/test/results/clientpositive/mm_all.q.out
@@ -135,7 +135,6 @@ STAGE PLANS:
               output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
               serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
               name: default.part_mm
-          Write Type: INSERT
           micromanaged table: true
 
   Stage: Stage-2

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
index 1512ffb..f849b1a 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
@@ -54,7 +54,7 @@ public class AcidEventListener extends MetaStoreEventListener 
{
 
   @Override
   public void onDropTable(DropTableEvent tableEvent)  throws MetaException {
-    if (TxnUtils.isAcidTable(tableEvent.getTable())) {
+    if (TxnUtils.isTransactionalTable(tableEvent.getTable())) {
       txnHandler = getTxnHandler();
       txnHandler.cleanupRecords(HiveObjectType.TABLE, null, 
tableEvent.getTable(), null);
     }
@@ -62,7 +62,7 @@ public class AcidEventListener extends MetaStoreEventListener 
{
 
   @Override
   public void onDropPartition(DropPartitionEvent partitionEvent)  throws 
MetaException {
-    if (TxnUtils.isAcidTable(partitionEvent.getTable())) {
+    if (TxnUtils.isTransactionalTable(partitionEvent.getTable())) {
       txnHandler = getTxnHandler();
       txnHandler.cleanupRecords(HiveObjectType.PARTITION, null, 
partitionEvent.getTable(),
           partitionEvent.getPartitionIterator());
@@ -76,7 +76,7 @@ public class AcidEventListener extends MetaStoreEventListener 
{
     boolean origConcurrency = false;
 
     // Since TxnUtils.getTxnStore calls TxnHandler.setConf -> 
checkQFileTestHack -> TxnDbUtil.setConfValues,
-    // which may change the values of below two entries, we need to avoid 
pulluting the original values
+    // which may change the values of below two entries, we need to avoid 
polluting the original values
     if (hackOn) {
       origTxnMgr = MetastoreConf.getVar(conf, ConfVars.HIVE_TXN_MANAGER);
       origConcurrency = MetastoreConf.getBoolVar(conf, 
ConfVars.HIVE_SUPPORT_CONCURRENCY);

http://git-wip-us.apache.org/repos/asf/hive/blob/9efed65a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
index da10313..c9ee688 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
@@ -37,6 +37,7 @@ import 
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
 import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
 import org.apache.hadoop.hive.metastore.events.PreEventContext;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -142,7 +143,7 @@ public final class TransactionalValidationListener extends 
MetaStorePreEventList
       }
 
       if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) 
{
-        throw new MetaException(getTableName(newTable) +
+        throw new MetaException(Warehouse.getQualifiedName(newTable) +
             " cannot be declared transactional because it's an external 
table");
       }
       validateTableStructure(context.getHandler(), newTable);
@@ -182,6 +183,17 @@ public final class TransactionalValidationListener extends 
MetaStorePreEventList
         }
       }
     }
+    checkSorted(newTable);
+  }
+  private void checkSorted(Table newTable) throws MetaException {
+    if(!TxnUtils.isAcidTable(newTable)) {
+      return;
+    }
+    StorageDescriptor sd = newTable.getSd();
+    if (sd.getSortCols() != null && sd.getSortCols().size() > 0) {
+      throw new MetaException("Table " + Warehouse.getQualifiedName(newTable)
+        + " cannot support full ACID functionality since it is sorted.");
+    }
   }
 
   /**
@@ -231,7 +243,7 @@ public final class TransactionalValidationListener extends 
MetaStorePreEventList
       }
 
       if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) 
{
-        throw new MetaException(newTable.getDbName() + "." + 
newTable.getTableName() +
+        throw new MetaException(Warehouse.getQualifiedName(newTable) +
             " cannot be declared transactional because it's an external 
table");
       }
 
@@ -241,9 +253,9 @@ public final class TransactionalValidationListener extends 
MetaStorePreEventList
         normazlieTransactionalPropertyDefault(newTable);
       }
       initializeTransactionalProperties(newTable);
+      checkSorted(newTable);
       return;
     }
-
     // transactional is found, but the value is not in expected range
     throw new MetaException("'transactional' property of TBLPROPERTIES may 
only have value 'true'");
   }
@@ -366,18 +378,16 @@ public final class TransactionalValidationListener 
extends MetaStorePreEventList
           );
         if (!validFile) {
           throw new IllegalStateException("Unexpected data file name format.  
Cannot convert " +
-            getTableName(table) + " to transactional table.  File: " + 
fileStatus.getPath());
+            Warehouse.getQualifiedName(table) + " to transactional table.  
File: "
+            + fileStatus.getPath());
         }
       }
     } catch (IOException|NoSuchObjectException e) {
-      String msg = "Unable to list files for " + getTableName(table);
+      String msg = "Unable to list files for " + 
Warehouse.getQualifiedName(table);
       LOG.error(msg, e);
       MetaException e1 = new MetaException(msg);
       e1.initCause(e);
       throw e1;
     }
   }
-  private static String getTableName(Table table) {
-    return table.getDbName() + "." + table.getTableName();
-  }
 }

Reply via email to