HIVE-14671 : Merge branch 'master' into hive-14535 (Wei Zheng)

Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/52e0f8f3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/52e0f8f3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/52e0f8f3

Branch: refs/heads/hive-14535
Commit: 52e0f8f34076fda56888b773f9e603323d7a0fc5
Parents: 21c209e 2fa4dc2
Author: Wei Zheng <w...@apache.org>
Authored: Thu May 25 13:48:45 2017 -0700
Committer: Wei Zheng <w...@apache.org>
Committed: Thu May 25 13:48:45 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hive/common/jsonexplain/Op.java      |     4 +-
 .../hadoop/hive/common/jsonexplain/TestOp.java  |    81 +
 .../hive/common/jsonexplain/TestStage.java      |   194 +
 .../hive/common/jsonexplain/TestVertex.java     |   108 +
 .../jsonexplain/tez/TestTezJsonParser.java      |    53 +
 errata.txt                                      |     3 +-
 .../listener/DummyRawStoreFailEvent.java        |    28 +-
 .../hive/ql/parse/TestReplicationScenarios.java |   171 +-
 .../org/apache/hive/jdbc/TestJdbcDriver2.java   |     2 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java    |     2 +
 .../hive/llap/daemon/impl/LlapDaemon.java       |     4 +-
 metastore/if/hive_metastore.thrift              |    61 +-
 .../upgrade/derby/042-HIVE-16575.derby.sql      |     4 +
 .../upgrade/derby/hive-schema-3.0.0.derby.sql   |     4 +-
 .../derby/upgrade-2.3.0-to-3.0.0.derby.sql      |     2 +-
 .../upgrade/hive/hive-schema-3.0.0.hive.sql     |    78 +-
 .../upgrade/mssql/027-HIVE-16575.mssql.sql      |     1 +
 .../upgrade/mssql/hive-schema-3.0.0.mssql.sql   |     2 +
 .../mssql/upgrade-2.3.0-to-3.0.0.mssql.sql      |     1 +
 .../upgrade/mysql/042-HIVE-16575.mysql.sql      |     1 +
 .../upgrade/mysql/hive-schema-3.0.0.mysql.sql   |     2 +
 .../mysql/upgrade-2.3.0-to-3.0.0.mysql.sql      |     1 +
 .../upgrade/oracle/042-HIVE-16575.oracle.sql    |     1 +
 .../upgrade/oracle/hive-schema-3.0.0.oracle.sql |     2 +
 .../oracle/upgrade-2.3.0-to-3.0.0.oracle.sql    |     1 +
 .../postgres/041-HIVE-16575.postgres.sql        |     1 +
 .../postgres/hive-schema-3.0.0.postgres.sql     |     2 +
 .../upgrade-2.3.0-to-3.0.0.postgres.sql         |     1 +
 .../metastore/hbase/HbaseMetastoreProto.java    |  4700 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  |  5850 ++-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |   584 +-
 .../ThriftHiveMetastore_server.skeleton.cpp     |    22 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  7378 +--
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   436 +
 .../hive/metastore/api/AbortTxnsRequest.java    |    32 +-
 .../metastore/api/AddDynamicPartitions.java     |    32 +-
 .../metastore/api/AddForeignKeyRequest.java     |    36 +-
 .../api/AddNotNullConstraintRequest.java        |   443 +
 .../metastore/api/AddPartitionsRequest.java     |    36 +-
 .../hive/metastore/api/AddPartitionsResult.java |    36 +-
 .../metastore/api/AddPrimaryKeyRequest.java     |    36 +-
 .../api/AddUniqueConstraintRequest.java         |   443 +
 .../metastore/api/ClearFileMetadataRequest.java |    32 +-
 .../hive/metastore/api/ClientCapabilities.java  |    32 +-
 .../hive/metastore/api/CompactionRequest.java   |    44 +-
 .../metastore/api/DropPartitionsResult.java     |    36 +-
 .../hive/metastore/api/FireEventRequest.java    |    32 +-
 .../hadoop/hive/metastore/api/Function.java     |    36 +-
 .../metastore/api/GetAllFunctionsResponse.java  |    36 +-
 .../api/GetFileMetadataByExprRequest.java       |    32 +-
 .../api/GetFileMetadataByExprResult.java        |    48 +-
 .../metastore/api/GetFileMetadataRequest.java   |    32 +-
 .../metastore/api/GetFileMetadataResult.java    |    44 +-
 .../metastore/api/GetOpenTxnsInfoResponse.java  |    36 +-
 .../hive/metastore/api/GetOpenTxnsResponse.java |    32 +-
 .../hive/metastore/api/GetTablesRequest.java    |    32 +-
 .../hive/metastore/api/GetTablesResult.java     |    36 +-
 .../api/HeartbeatTxnRangeResponse.java          |    64 +-
 .../metastore/api/InsertEventRequestData.java   |    64 +-
 .../hadoop/hive/metastore/api/LockRequest.java  |    36 +-
 .../api/NotNullConstraintsRequest.java          |   490 +
 .../api/NotNullConstraintsResponse.java         |   443 +
 .../api/NotificationEventResponse.java          |    36 +-
 .../hive/metastore/api/OpenTxnsResponse.java    |    32 +-
 .../metastore/api/PartitionsByExprResult.java   |    36 +-
 .../metastore/api/PartitionsStatsRequest.java   |    64 +-
 .../metastore/api/PartitionsStatsResult.java    |    76 +-
 .../metastore/api/PutFileMetadataRequest.java   |    64 +-
 .../hive/metastore/api/RequestPartsSpec.java    |    68 +-
 .../metastore/api/SQLNotNullConstraint.java     |  1005 +
 .../hive/metastore/api/SQLUniqueConstraint.java |  1103 +
 .../hive/metastore/api/ShowCompactResponse.java |    36 +-
 .../hive/metastore/api/ShowLocksResponse.java   |    36 +-
 .../hive/metastore/api/TableStatsRequest.java   |    32 +-
 .../hive/metastore/api/TableStatsResult.java    |    36 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 43424 +++++++++--------
 .../metastore/api/UniqueConstraintsRequest.java |   490 +
 .../api/UniqueConstraintsResponse.java          |   443 +
 .../gen-php/metastore/ThriftHiveMetastore.php   |  2554 +-
 .../src/gen/thrift/gen-php/metastore/Types.php  |  9511 ++--
 .../hive_metastore/ThriftHiveMetastore-remote   |    36 +-
 .../hive_metastore/ThriftHiveMetastore.py       |  2096 +-
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  2207 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   166 +
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   274 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |   128 +-
 .../hive/metastore/HiveMetaStoreClient.java     |    37 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |    21 +-
 .../hive/metastore/MetaStoreDirectSql.java      |   111 +-
 .../hadoop/hive/metastore/ObjectStore.java      |   324 +-
 .../apache/hadoop/hive/metastore/RawStore.java  |    15 +-
 .../hive/metastore/cache/CachedStore.java       |    37 +-
 .../hive/metastore/hbase/HBaseReadWrite.java    |    76 +-
 .../hadoop/hive/metastore/hbase/HBaseStore.java |   102 +-
 .../hadoop/hive/metastore/hbase/HBaseUtils.java |   131 +-
 .../hive/metastore/model/MConstraint.java       |     4 +
 .../metastore/hbase/hbase_metastore_proto.proto |    34 +
 .../DummyRawStoreControlledCommit.java          |    32 +-
 .../DummyRawStoreForJdoConnection.java          |    31 +-
 .../InjectableBehaviourObjectStore.java         |    20 +
 .../hive/metastore/hbase/TestHBaseStore.java    |   138 +-
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |     4 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |    78 +-
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |    19 +-
 .../apache/hadoop/hive/ql/hooks/ATSHook.java    |     4 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |     8 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |    75 +-
 .../hive/ql/metadata/NotNullConstraint.java     |    86 +
 .../hive/ql/metadata/UniqueConstraint.java      |   111 +
 .../formatting/JsonMetaDataFormatter.java       |    12 +-
 .../formatting/MetaDataFormatUtils.java         |    63 +-
 .../metadata/formatting/MetaDataFormatter.java  |     8 +-
 .../formatting/TextMetaDataFormatter.java       |    44 +-
 .../index/IndexWhereTaskDispatcher.java         |    34 +-
 .../hive/ql/parse/BaseSemanticAnalyzer.java     |   447 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |    85 +-
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |     1 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |   158 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |    31 +-
 .../hadoop/hive/ql/plan/AlterTableDesc.java     |    44 +-
 .../hadoop/hive/ql/plan/CreateTableDesc.java    |    41 +-
 .../hadoop/hive/ql/plan/ImportTableDesc.java    |     6 +-
 .../txn/compactor/HouseKeeperServiceBase.java   |     6 +-
 .../ql/TestTxnCommands2WithSplitUpdate.java     |    10 +-
 .../hadoop/hive/ql/exec/TestExplainTask.java    |   293 +-
 .../hadoop/hive/ql/hooks/TestATSHook.java       |    59 +
 .../hive/ql/parse/TestHiveDecimalParse.java     |     2 +-
 .../TestSQL11ReservedKeyWordsNegative.java      |    13 +
 .../alter_table_constraint_duplicate_pk.q       |     2 +-
 .../alter_table_constraint_invalid_fk_col1.q    |     4 +-
 .../alter_table_constraint_invalid_fk_col2.q    |     4 +-
 .../alter_table_constraint_invalid_fk_tbl1.q    |     4 +-
 .../alter_table_constraint_invalid_fk_tbl2.q    |     4 +-
 .../alter_table_constraint_invalid_pk_tbl.q     |     2 +-
 .../create_with_constraints_duplicate_name.q    |     4 +-
 .../create_with_constraints_enable.q            |     2 +-
 .../clientnegative/create_with_fk_constraint.q  |     2 +
 .../create_with_multi_pk_constraint.q           |     1 +
 .../clientnegative/drop_invalid_constraint1.q   |     2 +-
 .../clientnegative/drop_invalid_constraint2.q   |     2 +-
 .../clientnegative/drop_invalid_constraint3.q   |     2 +-
 .../clientnegative/drop_invalid_constraint4.q   |     4 +-
 .../clientpositive/create_with_constraints.q    |    84 +-
 .../alter_table_constraint_duplicate_pk.q.out   |     4 +-
 ...alter_table_constraint_invalid_fk_col1.q.out |     8 +-
 ...alter_table_constraint_invalid_fk_col2.q.out |     8 +-
 ...alter_table_constraint_invalid_fk_tbl1.q.out |     8 +-
 ...alter_table_constraint_invalid_fk_tbl2.q.out |     8 +-
 .../alter_table_constraint_invalid_pk_tbl.q.out |     4 +-
 ...create_with_constraints_duplicate_name.q.out |     6 +-
 .../create_with_constraints_enable.q.out        |     2 +-
 .../create_with_constraints_validate.q.out      |     2 +-
 .../create_with_fk_constraint.q.out             |    13 +
 .../create_with_multi_pk_constraint.q.out       |     1 +
 .../drop_invalid_constraint1.q.out              |     4 +-
 .../drop_invalid_constraint2.q.out              |     4 +-
 .../drop_invalid_constraint3.q.out              |     4 +-
 .../drop_invalid_constraint4.q.out              |     8 +-
 .../create_with_constraints.q.out               |  1241 +-
 .../clientpositive/llap/mm_conversions.q.out    |   135 +
 .../results/clientpositive/llap/sysdb.q.out     |   152 +-
 .../clientpositive/tez/explainuser_3.q.out      |     5 -
 .../hive/spark/client/SparkClientImpl.java      |    12 +-
 .../org/apache/hive/spark/client/rpc/Rpc.java   |     7 +-
 .../hive/spark/client/rpc/SaslHandler.java      |     2 +-
 165 files changed, 58646 insertions(+), 32439 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/metastore/if/hive_metastore.thrift
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --cc metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index e44cb9b,52bfb26..feea615
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@@ -6900,18 -7036,57 +6965,68 @@@ public class HiveMetaStore extends Thri
        return new ForeignKeysResponse(ret);
      }
  
 +    private void throwMetaException(Exception e) throws MetaException,
 +        NoSuchObjectException {
 +      if (e instanceof MetaException) {
 +        throw (MetaException) e;
 +      } else if (e instanceof NoSuchObjectException) {
 +        throw (NoSuchObjectException) e;
 +      } else {
 +        throw newMetaException(e);
 +      }
 +    }
 +
      @Override
+     public UniqueConstraintsResponse 
get_unique_constraints(UniqueConstraintsRequest request)
+         throws MetaException, NoSuchObjectException, TException {
+       String db_name = request.getDb_name();
+       String tbl_name = request.getTbl_name();
+       startTableFunction("get_unique_constraints", db_name, tbl_name);
+       List<SQLUniqueConstraint> ret = null;
+       Exception ex = null;
+       try {
+         ret = getMS().getUniqueConstraints(db_name, tbl_name);
+       } catch (Exception e) {
+         ex = e;
+         if (e instanceof MetaException) {
+           throw (MetaException) e;
+         } else if (e instanceof NoSuchObjectException) {
+           throw (NoSuchObjectException) e;
+         } else {
+           throw newMetaException(e);
+         }
+       } finally {
+         endFunction("get_unique_constraints", ret != null, ex, tbl_name);
+       }
+       return new UniqueConstraintsResponse(ret);
+     }
+ 
+     @Override
+     public NotNullConstraintsResponse 
get_not_null_constraints(NotNullConstraintsRequest request)
+         throws MetaException, NoSuchObjectException, TException {
+       String db_name = request.getDb_name();
+       String tbl_name = request.getTbl_name();
+       startTableFunction("get_not_null_constraints", db_name, tbl_name);
+       List<SQLNotNullConstraint> ret = null;
+       Exception ex = null;
+       try {
+         ret = getMS().getNotNullConstraints(db_name, tbl_name);
+       } catch (Exception e) {
+         ex = e;
+         if (e instanceof MetaException) {
+           throw (MetaException) e;
+         } else if (e instanceof NoSuchObjectException) {
+           throw (NoSuchObjectException) e;
+         } else {
+           throw newMetaException(e);
+         }
+       } finally {
+         endFunction("get_not_null_constraints", ret != null, ex, tbl_name);
+       }
+       return new NotNullConstraintsResponse(ret);
+     }
+ 
+     @Override
      public String get_metastore_db_uuid() throws MetaException, TException {
        try {
          return getMS().getMetastoreDbUuid();

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index f67831e,617555e..0b7a031
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@@ -3994,238 -4009,41 +4002,249 @@@ public class DDLTask extends Task<DDLWo
        throw new HiveException(ErrorMsg.UNSUPPORTED_ALTER_TBL_OP, 
alterTbl.getOp().toString());
      }
  
 -    return 0;
 +    return null;
 +  }
 +
 +  private List<Task<?>> alterTableDropProps(AlterTableDesc alterTbl, Table 
tbl,
 +      Partition part, EnvironmentContext environmentContext) throws 
HiveException {
 +    if (StatsSetupConst.USER.equals(environmentContext.getProperties()
 +        .get(StatsSetupConst.STATS_GENERATED))) {
 +      // drop a stats parameter, which triggers recompute stats update 
automatically
 +      
environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
 +    }
 +
 +    List<Task<?>> result = null;
 +    if (part == null) {
 +      Set<String> removedSet = alterTbl.getProps().keySet();
 +      boolean isFromMmTable = 
MetaStoreUtils.isInsertOnlyTable(tbl.getParameters()),
 +          isRemoved = MetaStoreUtils.isRemovedInsertOnlyTable(removedSet);
 +      if (isFromMmTable && isRemoved) {
 +        result = generateRemoveMmTasks(tbl);
 +      }
 +    }
 +    Iterator<String> keyItr = alterTbl.getProps().keySet().iterator();
 +    while (keyItr.hasNext()) {
 +      if (part != null) {
 +        part.getTPartition().getParameters().remove(keyItr.next());
 +      } else {
 +        tbl.getTTable().getParameters().remove(keyItr.next());
 +      }
 +    }
 +    return result;
 +  }
 +
 +  private List<Task<?>> generateRemoveMmTasks(Table tbl) throws HiveException 
{
 +    // To avoid confusion from nested MM directories when table is converted 
back and forth, we
 +    // want to rename mm_ dirs to remove the prefix; however, given the 
unpredictable nested
 +    // directory handling in Hive/MR, we will instead move all the files into 
the root directory.
 +    // We will also delete any directories that are not committed. 
 +    // Note that this relies on locks. Note also that we only do the renames 
AFTER the metastore
 +    // operation commits. Deleting uncommitted things is safe, but moving 
stuff before we convert
 +    // could cause data loss.
 +    List<Path> allMmDirs = new ArrayList<>();
 +    if (tbl.isStoredAsSubDirectories()) {
 +      // TODO: support this? we only bail because it's a PITA and hardly 
anyone seems to care.
 +      throw new HiveException("Converting list bucketed tables stored as 
subdirectories "
 +          + " to and from MM is not supported");
 +    }
 +    List<String> bucketCols = tbl.getBucketCols();
 +    if (bucketCols != null && !bucketCols.isEmpty()
 +        && HiveConf.getBoolVar(conf, ConfVars.HIVE_STRICT_CHECKS_BUCKETING)) {
 +      throw new HiveException("Converting bucketed tables from MM is not 
supported by default; "
 +          + "copying files from multiple MM directories may potentially break 
the buckets. You "
 +          + "can set " + ConfVars.HIVE_STRICT_CHECKS_BUCKETING.varname
 +          + " to false for this query if you want to force the conversion.");
 +    }
 +    Hive db = getHive();
 +    String value = conf.get(ValidTxnList.VALID_TXNS_KEY);
 +    ValidTxnList validTxnList = value == null ? new ValidReadTxnList() : new 
ValidReadTxnList(value);
 +    if (tbl.getPartitionKeys().size() > 0) {
 +      PartitionIterable parts = new PartitionIterable(db, tbl, null,
 +          HiveConf.getIntVar(conf, ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
 +      Iterator<Partition> partIter = parts.iterator();
 +      while (partIter.hasNext()) {
 +        Partition part = partIter.next();
 +        checkMmLb(part);
 +        handleRemoveMm(part.getDataLocation(), validTxnList, allMmDirs);
 +      }
 +    } else {
 +      checkMmLb(tbl);
 +      handleRemoveMm(tbl.getDataLocation(), validTxnList, allMmDirs);
 +    }
 +    List<Path> targetPaths = new ArrayList<>(allMmDirs.size());
 +    List<String> targetPrefix = new ArrayList<>(allMmDirs.size());
 +    int prefixLen = JavaUtils.DELTA_PREFIX.length();
 +    for (int i = 0; i < allMmDirs.size(); ++i) {
 +      Path src = allMmDirs.get(i);
 +      Path tgt = src.getParent();
 +      String prefix = src.getName().substring(prefixLen + 1) + "_";
 +      Utilities.LOG14535.info("Will move " + src + " to " + tgt + " (prefix " 
+ prefix + ")");
 +      targetPaths.add(tgt);
 +      targetPrefix.add(prefix);
 +    }
 +    // Don't set inputs and outputs - the locks have already been taken so 
it's pointless.
 +    MoveWork mw = new MoveWork(null, null, null, null, false);
 +    mw.setMultiFilesDesc(new LoadMultiFilesDesc(
 +        allMmDirs, targetPaths, targetPrefix, true, null, null));
 +    return Lists.<Task<?>>newArrayList(TaskFactory.get(mw, conf));
 +  }
 +
 +  private void checkMmLb(Table tbl) throws HiveException {
 +    if (!tbl.isStoredAsSubDirectories()) return;
 +    // TODO: support this?
 +    throw new HiveException("Converting list bucketed tables stored as 
subdirectories "
 +        + " to and from MM is not supported");
 +  }
 +
 +  private void checkMmLb(Partition part) throws HiveException {
 +    if (!part.isStoredAsSubDirectories()) return;
 +    // TODO: support this?
 +    throw new HiveException("Converting list bucketed tables stored as 
subdirectories "
 +        + " to and from MM is not supported. Please create a table in the 
desired format.");
 +  }
 +
 +  private void handleRemoveMm(
 +      Path path, ValidTxnList validTxnList, List<Path> result) throws 
HiveException {
 +    // Note: doesn't take LB into account; that is not presently supported 
here (throws above).
 +    try {
 +      FileSystem fs = path.getFileSystem(conf);
 +      for (FileStatus file : fs.listStatus(path)) {
 +        Path childPath = file.getPath();
 +        if (!file.isDirectory()) {
 +          ensureDelete(fs, childPath, "a non-directory file");
 +          continue;
 +        }
 +        Long writeId = JavaUtils.extractTxnId(childPath);
 +        if (writeId == null) {
 +          ensureDelete(fs, childPath, "an unknown directory");
 +        } else if (!validTxnList.isTxnValid(writeId)) {
 +          // Assume no concurrent active writes - we rely on locks here. We 
could check and fail.
 +          ensureDelete(fs, childPath, "an uncommitted directory");
 +        } else {
 +          result.add(childPath);
 +        }
 +      }
 +    } catch (IOException ex) {
 +      throw new HiveException(ex);
 +    }
 +  }
 +
 +  private static void ensureDelete(FileSystem fs, Path path, String what) 
throws IOException {
 +    Utilities.LOG14535.info("Deleting " + what + " " + path);
 +    try {
 +      if (!fs.delete(path, true)) throw new IOException("delete returned 
false");
 +    } catch (Exception ex) {
 +      String error = "Couldn't delete " + path + "; cannot remove MM setting 
from the table";
 +      LOG.error(error, ex);
 +      throw (ex instanceof IOException) ? (IOException)ex : new 
IOException(ex);
 +    }
 +  }
 +
 +  private List<Task<?>> generateAddMmTasks(Table tbl) throws HiveException {
 +    // We will move all the files in the table/partition directories into the 
first MM
 +    // directory, then commit the first write ID.
 +    List<Path> srcs = new ArrayList<>(), tgts = new ArrayList<>();
 +    long mmWriteId = 0;
 +    try {
 +      HiveTxnManager txnManager = SessionState.get().getTxnMgr();
 +      mmWriteId = txnManager.openTxn(new Context(conf), conf.getUser());
 +      txnManager.commitTxn();
 +    } catch (Exception e) {
 +      String errorMessage = "FAILED: Error in acquiring locks: " + 
e.getMessage();
 +      console.printError(errorMessage, "\n"
 +          + org.apache.hadoop.util.StringUtils.stringifyException(e));
 +    }
 +    int stmtId = 0;
 +    String mmDir = AcidUtils.deltaSubdir(mmWriteId, mmWriteId, stmtId);
 +    Hive db = getHive();
 +    if (tbl.getPartitionKeys().size() > 0) {
 +      PartitionIterable parts = new PartitionIterable(db, tbl, null,
 +          HiveConf.getIntVar(conf, ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
 +      Iterator<Partition> partIter = parts.iterator();
 +      while (partIter.hasNext()) {
 +        Partition part = partIter.next();
 +        checkMmLb(part);
 +        Path src = part.getDataLocation(), tgt = new Path(src, mmDir);
 +        srcs.add(src);
 +        tgts.add(tgt);
 +        Utilities.LOG14535.info("Will move " + src + " to " + tgt);
 +      }
 +    } else {
 +      checkMmLb(tbl);
 +      Path src = tbl.getDataLocation(), tgt = new Path(src, mmDir);
 +      srcs.add(src);
 +      tgts.add(tgt);
 +      Utilities.LOG14535.info("Will move " + src + " to " + tgt);
 +    }
 +    // Don't set inputs and outputs - the locks have already been taken so 
it's pointless.
 +    MoveWork mw = new MoveWork(null, null, null, null, false);
 +    mw.setMultiFilesDesc(new LoadMultiFilesDesc(srcs, tgts, true, null, 
null));
 +    ImportCommitWork icw = new ImportCommitWork(tbl.getDbName(), 
tbl.getTableName(), mmWriteId, stmtId);
 +    Task<?> mv = TaskFactory.get(mw, conf), ic = TaskFactory.get(icw, conf);
 +    mv.addDependentTask(ic);
 +    return Lists.<Task<?>>newArrayList(mv);
 +  }
 +
 +  private List<Task<?>> alterTableAddProps(AlterTableDesc alterTbl, Table tbl,
 +      Partition part, EnvironmentContext environmentContext) throws 
HiveException {
 +    if (StatsSetupConst.USER.equals(environmentContext.getProperties()
 +        .get(StatsSetupConst.STATS_GENERATED))) {
 +      
environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
 +    }
 +    
if(alterTbl.getProps().containsKey(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY))
 {
 +      NanoTimeUtils.validateTimeZone(
 +          
alterTbl.getProps().get(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY));
 +    }
 +    List<Task<?>> result = null;
 +    if (part != null) {
 +      part.getTPartition().getParameters().putAll(alterTbl.getProps());
 +    } else {
 +      boolean isFromMmTable = 
MetaStoreUtils.isInsertOnlyTable(tbl.getParameters());
 +      Boolean isToMmTable = 
MetaStoreUtils.isToInsertOnlyTable(alterTbl.getProps());
 +      if (isToMmTable != null) {
 +        if (!isFromMmTable && isToMmTable) {
 +          result = generateAddMmTasks(tbl);
 +        } else if (isFromMmTable && !isToMmTable) {
 +          result = generateRemoveMmTasks(tbl);
 +        }
 +      }
 +      tbl.getTTable().getParameters().putAll(alterTbl.getProps());
 +    }
 +    return result;
    }
  
-    private int dropConstraint(Hive db, AlterTableDesc alterTbl)
-     throws SemanticException, HiveException {
-      try {
-       db.dropConstraint(Utilities.getDatabaseName(alterTbl.getOldName()),
-         Utilities.getTableName(alterTbl.getOldName()),
-           alterTbl.getConstraintName());
-       } catch (NoSuchObjectException e) {
-         throw new HiveException(e);
-       }
-      return 0;
-    }
- 
-    private int addConstraint(Hive db, AlterTableDesc alterTbl)
-     throws SemanticException, HiveException {
+   private int dropConstraint(Hive db, AlterTableDesc alterTbl)
+           throws SemanticException, HiveException {
      try {
-     // This is either an alter table add foreign key or add primary key 
command.
-     if (!alterTbl.getForeignKeyCols().isEmpty()) {
-        db.addForeignKey(alterTbl.getForeignKeyCols());
-      } else if (!alterTbl.getPrimaryKeyCols().isEmpty()) {
-        db.addPrimaryKey(alterTbl.getPrimaryKeyCols());
+      db.dropConstraint(Utilities.getDatabaseName(alterTbl.getOldName()),
+        Utilities.getTableName(alterTbl.getOldName()),
+          alterTbl.getConstraintName());
+      } catch (NoSuchObjectException e) {
+        throw new HiveException(e);
       }
+     return 0;
+   }
+ 
+   private int addConstraints(Hive db, AlterTableDesc alterTbl)
+            throws SemanticException, HiveException {
+     try {
+       // This is either an alter table add foreign key or add primary key 
command.
+       if (alterTbl.getForeignKeyCols() != null
+               && !alterTbl.getForeignKeyCols().isEmpty()) {
+         db.addForeignKey(alterTbl.getForeignKeyCols());
+       }
+       if (alterTbl.getPrimaryKeyCols() != null
+               && !alterTbl.getPrimaryKeyCols().isEmpty()) {
+         db.addPrimaryKey(alterTbl.getPrimaryKeyCols());
+       }
+       if (alterTbl.getUniqueConstraintCols() != null
+               && !alterTbl.getUniqueConstraintCols().isEmpty()) {
+         db.addUniqueConstraint(alterTbl.getUniqueConstraintCols());
+       }
+       if (alterTbl.getNotNullConstraintCols() != null
+               && !alterTbl.getNotNullConstraintCols().isEmpty()) {
+         db.addNotNullConstraint(alterTbl.getNotNullConstraintCols());
+       }
      } catch (NoSuchObjectException e) {
        throw new HiveException(e);
      }

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index e723e2f,da00bb3..f325c0e
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@@ -345,11 -344,8 +345,11 @@@ public class AcidUtils 
      public static final String SPLIT_UPDATE_STRING = "split_update";
      public static final int HASH_BASED_MERGE_BIT = 0x02;
      public static final String HASH_BASED_MERGE_STRING = "hash_merge";
-     public static final int INSERT_ONLY_BIT = 0x03;
++    public static final int INSERT_ONLY_BIT = 0x04;
 +    public static final String INSERT_ONLY_STRING = "insert_only";
      public static final String DEFAULT_VALUE_STRING = 
TransactionalValidationListener.DEFAULT_TRANSACTIONAL_PROPERTY;
      public static final String LEGACY_VALUE_STRING = 
TransactionalValidationListener.LEGACY_TRANSACTIONAL_PROPERTY;
 +    public static final String INSERTONLY_VALUE_STRING = 
TransactionalValidationListener.INSERTONLY_TRANSACTIONAL_PROPERTY;
  
      private AcidOperationalProperties() {
      }
@@@ -374,17 -370,6 +374,18 @@@
        AcidOperationalProperties obj = new AcidOperationalProperties();
        obj.setSplitUpdate(true);
        obj.setHashBasedMerge(false);
++      obj.setInsertOnly(false);
 +      return obj;
 +    }
 +
 +    /**
 +     * Returns an acidOperationalProperties object for tables that uses ACID 
framework but only
 +     * supports INSERT operation and does not require ORC or bucketing
 +     * @return the acidOperationalProperties object
 +     */
 +    public static AcidOperationalProperties getInsertOnly() {
 +      AcidOperationalProperties obj = new AcidOperationalProperties();
 +      obj.setInsertOnly(true);
        return obj;
      }
  
@@@ -417,6 -399,6 +418,8 @@@
            case HASH_BASED_MERGE_STRING:
              obj.setHashBasedMerge(true);
              break;
++          case INSERT_ONLY_STRING:
++            obj.setInsertOnly(true);
            default:
              throw new IllegalArgumentException(
                  "Unexpected value " + option + " for ACID operational 
properties!");
@@@ -438,6 -420,6 +441,9 @@@
        if ((properties & HASH_BASED_MERGE_BIT)  > 0) {
          obj.setHashBasedMerge(true);
        }
++      if ((properties & INSERT_ONLY_BIT) > 0) {
++        obj.setInsertOnly(true);
++      }
        return obj;
      }
  

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index a16bf91,3f032c8..66c662f
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@@ -65,10 -62,8 +65,9 @@@ import org.apache.hadoop.fs.FileStatus
  import org.apache.hadoop.fs.FileSystem;
  import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.fs.PathFilter;
- import org.apache.hadoop.hive.common.BlobStorageUtils;
  import org.apache.hadoop.hive.common.FileUtils;
  import org.apache.hadoop.hive.common.HiveStatsUtils;
 +import org.apache.hadoop.hive.common.JavaUtils;
  import org.apache.hadoop.hive.common.ObjectPair;
  import org.apache.hadoop.hive.common.StatsSetupConst;
  import 
org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index e36623c,dee6a10..d5947bd
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@@ -41,9 -42,10 +42,11 @@@ import org.apache.hadoop.hive.metastore
  import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
  import org.apache.hadoop.hive.metastore.api.Order;
  import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
  import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
  import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
  import org.apache.hadoop.hive.ql.Driver;
  import org.apache.hadoop.hive.ql.ErrorMsg;
  import org.apache.hadoop.hive.ql.QueryState;

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
index 344893c,7b46fcd..f9314a7
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
@@@ -96,10 -98,8 +98,12 @@@ public class CreateTableDesc extends DD
    private boolean isCTAS = false;
    List<SQLPrimaryKey> primaryKeys;
    List<SQLForeignKey> foreignKeys;
+   List<SQLUniqueConstraint> uniqueConstraints;
+   List<SQLNotNullConstraint> notNullConstraints;
 +  private Long initialMmWriteId; // Initial MM write ID for CTAS and import.
 +  // The FSOP configuration for the FSOP that is going to write initial data 
during ctas.
 +  // This is not needed beyond compilation, so it is transient.
 +  private transient FileSinkDesc writer;
  
    public CreateTableDesc() {
    }

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2WithSplitUpdate.java
----------------------------------------------------------------------
diff --cc 
ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2WithSplitUpdate.java
index ea5ecbc,ea5ecbc..d9be605
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2WithSplitUpdate.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2WithSplitUpdate.java
@@@ -444,11 -444,11 +444,11 @@@ public class TestTxnCommands2WithSplitU
          FileStatus[] buckets = fs.listStatus(status[i].getPath(), 
FileUtils.STAGING_DIR_PATH_FILTER);
          Arrays.sort(buckets);
          if (numDelta == 1) {
--          Assert.assertEquals("delta_0000022_0000022_0000", 
status[i].getPath().getName());
++          Assert.assertEquals("delta_0000024_0000024_0000", 
status[i].getPath().getName());
            Assert.assertEquals(BUCKET_COUNT - 1, buckets.length);
            Assert.assertEquals("bucket_00001", buckets[0].getPath().getName());
          } else if (numDelta == 2) {
--          Assert.assertEquals("delta_0000023_0000023_0000", 
status[i].getPath().getName());
++          Assert.assertEquals("delta_0000025_0000025_0000", 
status[i].getPath().getName());
            Assert.assertEquals(1, buckets.length);
            Assert.assertEquals("bucket_00001", buckets[0].getPath().getName());
          }
@@@ -457,7 -457,7 +457,7 @@@
          FileStatus[] buckets = fs.listStatus(status[i].getPath(), 
FileUtils.STAGING_DIR_PATH_FILTER);
          Arrays.sort(buckets);
          if (numDeleteDelta == 1) {
--          Assert.assertEquals("delete_delta_0000022_0000022_0000", 
status[i].getPath().getName());
++          Assert.assertEquals("delete_delta_0000024_0000024_0000", 
status[i].getPath().getName());
            Assert.assertEquals(BUCKET_COUNT - 1, buckets.length);
            Assert.assertEquals("bucket_00001", buckets[0].getPath().getName());
          }
@@@ -504,7 -504,7 +504,7 @@@
            Assert.assertEquals("bucket_00001", buckets[0].getPath().getName());
          } else if (numBase == 2) {
            // The new base dir now has two bucket files, since the delta dir 
has two bucket files
--          Assert.assertEquals("base_0000023", status[i].getPath().getName());
++          Assert.assertEquals("base_0000025", status[i].getPath().getName());
            Assert.assertEquals(1, buckets.length);
            Assert.assertEquals("bucket_00001", buckets[0].getPath().getName());
          }
@@@ -530,7 -530,7 +530,7 @@@
      status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
          (Table.NONACIDORCTBL).toString().toLowerCase()), 
FileUtils.STAGING_DIR_PATH_FILTER);
      Assert.assertEquals(1, status.length);
--    Assert.assertEquals("base_0000023", status[0].getPath().getName());
++    Assert.assertEquals("base_0000025", status[0].getPath().getName());
      FileStatus[] buckets = fs.listStatus(status[0].getPath(), 
FileUtils.STAGING_DIR_PATH_FILTER);
      Arrays.sort(buckets);
      Assert.assertEquals(1, buckets.length);

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/ql/src/test/results/clientpositive/llap/mm_conversions.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/llap/mm_conversions.q.out
index 861acaf,0000000..1610672
mode 100644,000000..100644
--- a/ql/src/test/results/clientpositive/llap/mm_conversions.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_conversions.q.out
@@@ -1,720 -1,0 +1,855 @@@
 +PREHOOK: query: drop table intermediate
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table intermediate
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table intermediate(key int) partitioned by (p int) 
stored as orc
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@intermediate
 +POSTHOOK: query: create table intermediate(key int) partitioned by (p int) 
stored as orc
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@intermediate
 +PREHOOK: query: insert into table intermediate partition(p='455') select 
distinct key from src where key >= 0 order by key desc limit 1
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@src
 +PREHOOK: Output: default@intermediate@p=455
 +POSTHOOK: query: insert into table intermediate partition(p='455') select 
distinct key from src where key >= 0 order by key desc limit 1
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@src
 +POSTHOOK: Output: default@intermediate@p=455
 +POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
 +PREHOOK: query: insert into table intermediate partition(p='456') select 
distinct key from src where key is not null order by key asc limit 1
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@src
 +PREHOOK: Output: default@intermediate@p=456
 +POSTHOOK: query: insert into table intermediate partition(p='456') select 
distinct key from src where key is not null order by key asc limit 1
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@src
 +POSTHOOK: Output: default@intermediate@p=456
 +POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
 +PREHOOK: query: insert into table intermediate partition(p='457') select 
distinct key from src where key >= 100 order by key asc limit 1
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@src
 +PREHOOK: Output: default@intermediate@p=457
 +POSTHOOK: query: insert into table intermediate partition(p='457') select 
distinct key from src where key >= 100 order by key asc limit 1
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@src
 +POSTHOOK: Output: default@intermediate@p=457
 +POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
 +PREHOOK: query: drop table simple_from_mm1
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table simple_from_mm1
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table simple_from_mm1(key int) stored as orc 
tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@simple_from_mm1
 +POSTHOOK: query: create table simple_from_mm1(key int) stored as orc 
tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@simple_from_mm1
 +PREHOOK: query: insert into table simple_from_mm1 select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@simple_from_mm1
 +POSTHOOK: query: insert into table simple_from_mm1 select key from 
intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@simple_from_mm1
 +POSTHOOK: Lineage: simple_from_mm1.key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table simple_from_mm1 select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@simple_from_mm1
 +POSTHOOK: query: insert into table simple_from_mm1 select key from 
intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@simple_from_mm1
 +POSTHOOK: Lineage: simple_from_mm1.key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from simple_from_mm1 s1 order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@simple_from_mm1
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from simple_from_mm1 s1 order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@simple_from_mm1
 +#### A masked pattern was here ####
++0
++0
++98
++98
++100
++100
 +PREHOOK: query: alter table simple_from_mm1 unset 
tblproperties('transactional_properties', 'transactional')
 +PREHOOK: type: ALTERTABLE_PROPERTIES
 +PREHOOK: Input: default@simple_from_mm1
 +PREHOOK: Output: default@simple_from_mm1
 +POSTHOOK: query: alter table simple_from_mm1 unset 
tblproperties('transactional_properties', 'transactional')
 +POSTHOOK: type: ALTERTABLE_PROPERTIES
 +POSTHOOK: Input: default@simple_from_mm1
 +POSTHOOK: Output: default@simple_from_mm1
 +PREHOOK: query: select * from simple_from_mm1 s2 order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@simple_from_mm1
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from simple_from_mm1 s2 order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@simple_from_mm1
 +#### A masked pattern was here ####
++0
++0
++98
++98
++100
++100
 +PREHOOK: query: insert into table simple_from_mm1 select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@simple_from_mm1
 +POSTHOOK: query: insert into table simple_from_mm1 select key from 
intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@simple_from_mm1
 +POSTHOOK: Lineage: simple_from_mm1.key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from simple_from_mm1 s3 order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@simple_from_mm1
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from simple_from_mm1 s3 order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@simple_from_mm1
 +#### A masked pattern was here ####
++0
++0
++0
++98
++98
++98
++100
++100
++100
 +PREHOOK: query: drop table simple_from_mm1
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@simple_from_mm1
 +PREHOOK: Output: default@simple_from_mm1
 +POSTHOOK: query: drop table simple_from_mm1
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@simple_from_mm1
 +POSTHOOK: Output: default@simple_from_mm1
 +PREHOOK: query: drop table simple_from_mm2
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table simple_from_mm2
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table simple_from_mm2(key int) stored as orc 
tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@simple_from_mm2
 +POSTHOOK: query: create table simple_from_mm2(key int) stored as orc 
tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@simple_from_mm2
 +PREHOOK: query: insert into table simple_from_mm2 select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@simple_from_mm2
 +POSTHOOK: query: insert into table simple_from_mm2 select key from 
intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@simple_from_mm2
 +POSTHOOK: Lineage: simple_from_mm2.key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table simple_from_mm2 select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@simple_from_mm2
 +POSTHOOK: query: insert into table simple_from_mm2 select key from 
intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@simple_from_mm2
 +POSTHOOK: Lineage: simple_from_mm2.key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from simple_from_mm2 s1 order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@simple_from_mm2
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from simple_from_mm2 s1 order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@simple_from_mm2
 +#### A masked pattern was here ####
++0
++0
++98
++98
++100
++100
 +PREHOOK: query: alter table simple_from_mm2 set 
tblproperties("transactional"="false", 'transactional_properties'='false')
 +PREHOOK: type: ALTERTABLE_PROPERTIES
 +PREHOOK: Input: default@simple_from_mm2
 +PREHOOK: Output: default@simple_from_mm2
 +POSTHOOK: query: alter table simple_from_mm2 set 
tblproperties("transactional"="false", 'transactional_properties'='false')
 +POSTHOOK: type: ALTERTABLE_PROPERTIES
 +POSTHOOK: Input: default@simple_from_mm2
 +POSTHOOK: Output: default@simple_from_mm2
 +PREHOOK: query: select * from simple_from_mm2 s2 order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@simple_from_mm2
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from simple_from_mm2 s2 order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@simple_from_mm2
 +#### A masked pattern was here ####
++0
++0
++98
++98
++100
++100
 +PREHOOK: query: insert into table simple_from_mm2 select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@simple_from_mm2
 +POSTHOOK: query: insert into table simple_from_mm2 select key from 
intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@simple_from_mm2
 +POSTHOOK: Lineage: simple_from_mm2.key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from simple_from_mm2 s3 order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@simple_from_mm2
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from simple_from_mm2 s3 order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@simple_from_mm2
 +#### A masked pattern was here ####
++0
++0
++0
++98
++98
++98
++100
++100
++100
 +PREHOOK: query: drop table simple_from_mm2
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@simple_from_mm2
 +PREHOOK: Output: default@simple_from_mm2
 +POSTHOOK: query: drop table simple_from_mm2
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@simple_from_mm2
 +POSTHOOK: Output: default@simple_from_mm2
 +PREHOOK: query: drop table simple_to_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table simple_to_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table simple_to_mm(key int) stored as orc
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@simple_to_mm
 +POSTHOOK: query: create table simple_to_mm(key int) stored as orc
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@simple_to_mm
 +PREHOOK: query: insert into table simple_to_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@simple_to_mm
 +POSTHOOK: query: insert into table simple_to_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@simple_to_mm
 +POSTHOOK: Lineage: simple_to_mm.key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from simple_to_mm s1 order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@simple_to_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from simple_to_mm s1 order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@simple_to_mm
 +#### A masked pattern was here ####
++0
++98
++100
 +PREHOOK: query: alter table simple_to_mm set 
tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: ALTERTABLE_PROPERTIES
 +PREHOOK: Input: default@simple_to_mm
 +PREHOOK: Output: default@simple_to_mm
 +FAILED: Error in acquiring locks: Transaction already opened. txnid:30
 +POSTHOOK: query: alter table simple_to_mm set 
tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: ALTERTABLE_PROPERTIES
 +POSTHOOK: Input: default@simple_to_mm
 +POSTHOOK: Output: default@simple_to_mm
 +PREHOOK: query: select * from simple_to_mm s2 order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@simple_to_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from simple_to_mm s2 order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@simple_to_mm
 +#### A masked pattern was here ####
++0
++98
++100
 +PREHOOK: query: insert into table simple_to_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@simple_to_mm
 +POSTHOOK: query: insert into table simple_to_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@simple_to_mm
 +POSTHOOK: Lineage: simple_to_mm.key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table simple_to_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@simple_to_mm
 +POSTHOOK: query: insert into table simple_to_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@simple_to_mm
 +POSTHOOK: Lineage: simple_to_mm.key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from simple_to_mm s3 order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@simple_to_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from simple_to_mm s3 order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@simple_to_mm
 +#### A masked pattern was here ####
++0
++0
++0
++98
++98
++98
++100
++100
++100
 +PREHOOK: query: drop table simple_to_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@simple_to_mm
 +PREHOOK: Output: default@simple_to_mm
 +POSTHOOK: query: drop table simple_to_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@simple_to_mm
 +POSTHOOK: Output: default@simple_to_mm
 +PREHOOK: query: drop table part_from_mm1
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table part_from_mm1
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table part_from_mm1(key int) partitioned by (key_mm 
int) stored as orc tblproperties ("transactional"="true", 
"transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@part_from_mm1
 +POSTHOOK: query: create table part_from_mm1(key int) partitioned by (key_mm 
int) stored as orc tblproperties ("transactional"="true", 
"transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@part_from_mm1
 +PREHOOK: query: insert into table part_from_mm1 partition(key_mm='455') 
select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_from_mm1@key_mm=455
 +POSTHOOK: query: insert into table part_from_mm1 partition(key_mm='455') 
select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_from_mm1@key_mm=455
 +POSTHOOK: Lineage: part_from_mm1 PARTITION(key_mm=455).key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table part_from_mm1 partition(key_mm='455') 
select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_from_mm1@key_mm=455
 +POSTHOOK: query: insert into table part_from_mm1 partition(key_mm='455') 
select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_from_mm1@key_mm=455
 +POSTHOOK: Lineage: part_from_mm1 PARTITION(key_mm=455).key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table part_from_mm1 partition(key_mm='456') 
select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_from_mm1@key_mm=456
 +POSTHOOK: query: insert into table part_from_mm1 partition(key_mm='456') 
select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_from_mm1@key_mm=456
 +POSTHOOK: Lineage: part_from_mm1 PARTITION(key_mm=456).key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from part_from_mm1 s1 order by key, key_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@part_from_mm1
 +PREHOOK: Input: default@part_from_mm1@key_mm=455
 +PREHOOK: Input: default@part_from_mm1@key_mm=456
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from part_from_mm1 s1 order by key, key_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@part_from_mm1
 +POSTHOOK: Input: default@part_from_mm1@key_mm=455
 +POSTHOOK: Input: default@part_from_mm1@key_mm=456
 +#### A masked pattern was here ####
++0     455
++0     455
++0     456
++98    455
++98    455
++98    456
++100   455
++100   455
++100   456
 +PREHOOK: query: alter table part_from_mm1 unset 
tblproperties('transactional_properties', 'transactional')
 +PREHOOK: type: ALTERTABLE_PROPERTIES
 +PREHOOK: Input: default@part_from_mm1
 +PREHOOK: Output: default@part_from_mm1
 +POSTHOOK: query: alter table part_from_mm1 unset 
tblproperties('transactional_properties', 'transactional')
 +POSTHOOK: type: ALTERTABLE_PROPERTIES
 +POSTHOOK: Input: default@part_from_mm1
 +POSTHOOK: Output: default@part_from_mm1
 +PREHOOK: query: select * from part_from_mm1 s2 order by key, key_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@part_from_mm1
 +PREHOOK: Input: default@part_from_mm1@key_mm=455
 +PREHOOK: Input: default@part_from_mm1@key_mm=456
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from part_from_mm1 s2 order by key, key_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@part_from_mm1
 +POSTHOOK: Input: default@part_from_mm1@key_mm=455
 +POSTHOOK: Input: default@part_from_mm1@key_mm=456
 +#### A masked pattern was here ####
++0     455
++0     455
++0     456
++98    455
++98    455
++98    456
++100   455
++100   455
++100   456
 +PREHOOK: query: insert into table part_from_mm1 partition(key_mm='456') 
select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_from_mm1@key_mm=456
 +POSTHOOK: query: insert into table part_from_mm1 partition(key_mm='456') 
select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_from_mm1@key_mm=456
 +POSTHOOK: Lineage: part_from_mm1 PARTITION(key_mm=456).key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table part_from_mm1 partition(key_mm='457') 
select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_from_mm1@key_mm=457
 +POSTHOOK: query: insert into table part_from_mm1 partition(key_mm='457') 
select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_from_mm1@key_mm=457
 +POSTHOOK: Lineage: part_from_mm1 PARTITION(key_mm=457).key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from part_from_mm1 s3 order by key, key_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@part_from_mm1
 +PREHOOK: Input: default@part_from_mm1@key_mm=455
 +PREHOOK: Input: default@part_from_mm1@key_mm=456
 +PREHOOK: Input: default@part_from_mm1@key_mm=457
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from part_from_mm1 s3 order by key, key_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@part_from_mm1
 +POSTHOOK: Input: default@part_from_mm1@key_mm=455
 +POSTHOOK: Input: default@part_from_mm1@key_mm=456
 +POSTHOOK: Input: default@part_from_mm1@key_mm=457
 +#### A masked pattern was here ####
++0     455
++0     455
++0     456
++0     456
++0     457
++98    455
++98    455
++98    456
++98    456
++98    457
++100   455
++100   455
++100   456
++100   456
++100   457
 +PREHOOK: query: drop table part_from_mm1
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@part_from_mm1
 +PREHOOK: Output: default@part_from_mm1
 +POSTHOOK: query: drop table part_from_mm1
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@part_from_mm1
 +POSTHOOK: Output: default@part_from_mm1
 +PREHOOK: query: drop table part_from_mm2
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table part_from_mm2
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table part_from_mm2(key int) partitioned by (key_mm 
int) stored as orc tblproperties ("transactional"="true", 
"transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@part_from_mm2
 +POSTHOOK: query: create table part_from_mm2(key int) partitioned by (key_mm 
int) stored as orc tblproperties ("transactional"="true", 
"transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@part_from_mm2
 +PREHOOK: query: insert into table part_from_mm2 partition(key_mm='456') 
select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_from_mm2@key_mm=456
 +POSTHOOK: query: insert into table part_from_mm2 partition(key_mm='456') 
select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_from_mm2@key_mm=456
 +POSTHOOK: Lineage: part_from_mm2 PARTITION(key_mm=456).key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: --fails here
 +insert into table part_from_mm2 partition(key_mm='455') select key from 
intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_from_mm2@key_mm=455
 +POSTHOOK: query: --fails here
 +insert into table part_from_mm2 partition(key_mm='455') select key from 
intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_from_mm2@key_mm=455
 +POSTHOOK: Lineage: part_from_mm2 PARTITION(key_mm=455).key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from part_from_mm2 s1 order by key, key_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@part_from_mm2
 +PREHOOK: Input: default@part_from_mm2@key_mm=455
 +PREHOOK: Input: default@part_from_mm2@key_mm=456
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from part_from_mm2 s1 order by key, key_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@part_from_mm2
 +POSTHOOK: Input: default@part_from_mm2@key_mm=455
 +POSTHOOK: Input: default@part_from_mm2@key_mm=456
 +#### A masked pattern was here ####
++0     455
++0     456
++98    455
++98    456
++100   455
++100   456
 +PREHOOK: query: alter table part_from_mm2 set 
tblproperties("transactional"="false", 'transactional_properties'='false')
 +PREHOOK: type: ALTERTABLE_PROPERTIES
 +PREHOOK: Input: default@part_from_mm2
 +PREHOOK: Output: default@part_from_mm2
 +POSTHOOK: query: alter table part_from_mm2 set 
tblproperties("transactional"="false", 'transactional_properties'='false')
 +POSTHOOK: type: ALTERTABLE_PROPERTIES
 +POSTHOOK: Input: default@part_from_mm2
 +POSTHOOK: Output: default@part_from_mm2
 +PREHOOK: query: select * from part_from_mm2 s2 order by key, key_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@part_from_mm2
 +PREHOOK: Input: default@part_from_mm2@key_mm=455
 +PREHOOK: Input: default@part_from_mm2@key_mm=456
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from part_from_mm2 s2 order by key, key_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@part_from_mm2
 +POSTHOOK: Input: default@part_from_mm2@key_mm=455
 +POSTHOOK: Input: default@part_from_mm2@key_mm=456
 +#### A masked pattern was here ####
++0     455
++0     456
++98    455
++98    456
++100   455
++100   456
 +PREHOOK: query: insert into table part_from_mm2 partition(key_mm='457') 
select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_from_mm2@key_mm=457
 +POSTHOOK: query: insert into table part_from_mm2 partition(key_mm='457') 
select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_from_mm2@key_mm=457
 +POSTHOOK: Lineage: part_from_mm2 PARTITION(key_mm=457).key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from part_from_mm2 s3 order by key, key_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@part_from_mm2
 +PREHOOK: Input: default@part_from_mm2@key_mm=455
 +PREHOOK: Input: default@part_from_mm2@key_mm=456
 +PREHOOK: Input: default@part_from_mm2@key_mm=457
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from part_from_mm2 s3 order by key, key_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@part_from_mm2
 +POSTHOOK: Input: default@part_from_mm2@key_mm=455
 +POSTHOOK: Input: default@part_from_mm2@key_mm=456
 +POSTHOOK: Input: default@part_from_mm2@key_mm=457
 +#### A masked pattern was here ####
++0     455
++0     456
++0     457
++98    455
++98    456
++98    457
++100   455
++100   456
++100   457
 +PREHOOK: query: drop table part_from_mm2
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@part_from_mm2
 +PREHOOK: Output: default@part_from_mm2
 +POSTHOOK: query: drop table part_from_mm2
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@part_from_mm2
 +POSTHOOK: Output: default@part_from_mm2
 +PREHOOK: query: drop table part_to_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table part_to_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table part_to_mm(key int) partitioned by (key_mm int) 
stored as orc
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@part_to_mm
 +POSTHOOK: query: create table part_to_mm(key int) partitioned by (key_mm int) 
stored as orc
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@part_to_mm
 +PREHOOK: query: insert into table part_to_mm partition(key_mm='455') select 
key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_to_mm@key_mm=455
 +POSTHOOK: query: insert into table part_to_mm partition(key_mm='455') select 
key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_to_mm@key_mm=455
 +POSTHOOK: Lineage: part_to_mm PARTITION(key_mm=455).key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table part_to_mm partition(key_mm='456') select 
key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_to_mm@key_mm=456
 +POSTHOOK: query: insert into table part_to_mm partition(key_mm='456') select 
key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_to_mm@key_mm=456
 +POSTHOOK: Lineage: part_to_mm PARTITION(key_mm=456).key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from part_to_mm s1 order by key, key_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@part_to_mm
 +PREHOOK: Input: default@part_to_mm@key_mm=455
 +PREHOOK: Input: default@part_to_mm@key_mm=456
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from part_to_mm s1 order by key, key_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@part_to_mm
 +POSTHOOK: Input: default@part_to_mm@key_mm=455
 +POSTHOOK: Input: default@part_to_mm@key_mm=456
 +#### A masked pattern was here ####
++0     455
++0     456
++98    455
++98    456
++100   455
++100   456
 +PREHOOK: query: alter table part_to_mm set 
tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: ALTERTABLE_PROPERTIES
 +PREHOOK: Input: default@part_to_mm
 +PREHOOK: Output: default@part_to_mm
 +FAILED: Error in acquiring locks: Transaction already opened. txnid:63
 +POSTHOOK: query: alter table part_to_mm set 
tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: ALTERTABLE_PROPERTIES
 +POSTHOOK: Input: default@part_to_mm
 +POSTHOOK: Output: default@part_to_mm
 +PREHOOK: query: select * from part_to_mm s2 order by key, key_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@part_to_mm
 +PREHOOK: Input: default@part_to_mm@key_mm=455
 +PREHOOK: Input: default@part_to_mm@key_mm=456
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from part_to_mm s2 order by key, key_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@part_to_mm
 +POSTHOOK: Input: default@part_to_mm@key_mm=455
 +POSTHOOK: Input: default@part_to_mm@key_mm=456
 +#### A masked pattern was here ####
++0     455
++0     456
++98    455
++98    456
++100   455
++100   456
 +PREHOOK: query: insert into table part_to_mm partition(key_mm='456') select 
key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_to_mm@key_mm=456
 +POSTHOOK: query: insert into table part_to_mm partition(key_mm='456') select 
key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_to_mm@key_mm=456
 +POSTHOOK: Lineage: part_to_mm PARTITION(key_mm=456).key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table part_to_mm partition(key_mm='457') select 
key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_to_mm@key_mm=457
 +POSTHOOK: query: insert into table part_to_mm partition(key_mm='457') select 
key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_to_mm@key_mm=457
 +POSTHOOK: Lineage: part_to_mm PARTITION(key_mm=457).key SIMPLE 
[(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from part_to_mm s3 order by key, key_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@part_to_mm
 +PREHOOK: Input: default@part_to_mm@key_mm=455
 +PREHOOK: Input: default@part_to_mm@key_mm=456
 +PREHOOK: Input: default@part_to_mm@key_mm=457
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from part_to_mm s3 order by key, key_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@part_to_mm
 +POSTHOOK: Input: default@part_to_mm@key_mm=455
 +POSTHOOK: Input: default@part_to_mm@key_mm=456
 +POSTHOOK: Input: default@part_to_mm@key_mm=457
 +#### A masked pattern was here ####
++0     455
++0     456
++0     456
++0     457
++98    455
++98    456
++98    456
++98    457
++100   455
++100   456
++100   456
++100   457
 +PREHOOK: query: drop table part_to_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@part_to_mm
 +PREHOOK: Output: default@part_to_mm
 +POSTHOOK: query: drop table part_to_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@part_to_mm
 +POSTHOOK: Output: default@part_to_mm
 +PREHOOK: query: drop table intermediate
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Output: default@intermediate
 +POSTHOOK: query: drop table intermediate
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Output: default@intermediate

http://git-wip-us.apache.org/repos/asf/hive/blob/52e0f8f3/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/tez/explainuser_3.q.out
index cb867fd,65c9114..f68d2bb
--- a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
@@@ -509,16 -509,11 +509,11 @@@ Stage-
                    Conditional Operator
                      Stage-1
                        Map 1 vectorized
 -                      File Output Operator [FS_10]
 +                      File Output Operator [FS_8]
                          table:{"name:":"default.orc_merge5"}
 -                        Select Operator [SEL_9] (rows=306 width=268)
 +                        Select Operator [SEL_7] (rows=306 width=268)
                            Output:["_col0","_col1","_col2","_col3","_col4"]
 -                          Filter Operator [FIL_8] (rows=306 width=268)
 +                          Filter Operator [FIL_6] (rows=306 width=268)
-                       File Output Operator [FS_3]
-                         table:{"name:":"default.orc_merge5"}
-                         Select Operator [SEL_2] (rows=306 width=268)
-                           Output:["_col0","_col1","_col2","_col3","_col4"]
-                           Filter Operator [FIL_4] (rows=306 width=268)
                              predicate:(userid <= 13)
                              TableScan [TS_0] (rows=919 width=268)
                                
default@orc_merge5,orc_merge5,Tbl:COMPLETE,Col:NONE,Output:["userid","string1","subtype","decimal1","ts"]

Reply via email to