hive git commit: HIVE-19530: Vectorization: Fix JDBCSerde and re-enable vectorization (Matt McCline, reviewed by Teddy Choi)

2018-06-18 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/branch-3 7c32fce80 -> 72df94747


HIVE-19530: Vectorization: Fix JDBCSerde and re-enable vectorization (Matt 
McCline, reviewed by Teddy Choi)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/72df9474
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/72df9474
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/72df9474

Branch: refs/heads/branch-3
Commit: 72df947478ad2021b0d6e255993e1074ab94ab3a
Parents: 7c32fce
Author: Matt McCline 
Authored: Tue Jun 19 00:13:48 2018 -0500
Committer: Matt McCline 
Committed: Tue Jun 19 00:15:15 2018 -0500

--
 common/src/java/org/apache/hadoop/hive/conf/HiveConf.java  | 2 +-
 ql/src/test/results/clientpositive/llap/jdbc_handler.q.out | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/72df9474/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index fc64f86..d6301ec 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -3552,7 +3552,7 @@ public class HiveConf extends Configuration {
 "The default value is false."),
 HIVE_VECTORIZATION_ROW_DESERIALIZE_INPUTFORMAT_EXCLUDES(
 "hive.vectorized.row.serde.inputformat.excludes",
-
"org.apache.parquet.hadoop.ParquetInputFormat,org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat,org.apache.hive.storage.jdbc.JdbcInputFormat",
+
"org.apache.parquet.hadoop.ParquetInputFormat,org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
 "The input formats not supported by row deserialize vectorization."),
 HIVE_VECTOR_ADAPTOR_USAGE_MODE("hive.vectorized.adaptor.usage.mode", 
"all", new StringSet("none", "chosen", "all"),
 "Specifies the extent to which the VectorUDFAdaptor will be used for 
UDFs that do not have a corresponding vectorized class.\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/72df9474/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out 
b/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out
index 90d88df..94fadf1 100644
--- a/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out
+++ b/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out
@@ -253,7 +253,7 @@ STAGE PLANS:
   sort order: 
   Statistics: Num rows: 1 Data size: 184 Basic stats: 
COMPLETE Column stats: NONE
   value expressions: _col0 (type: string)
-Execution mode: llap
+Execution mode: vectorized, llap
 LLAP IO: no inputs
 Map 4 
 Map Operator Tree:



hive git commit: HIVE-19530: Vectorization: Fix JDBCSerde and re-enable vectorization (Matt McCline, reviewed by Teddy Choi)

2018-06-18 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/master cf2e18534 -> d5cb88d52


HIVE-19530: Vectorization: Fix JDBCSerde and re-enable vectorization (Matt 
McCline, reviewed by Teddy Choi)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d5cb88d5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d5cb88d5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d5cb88d5

Branch: refs/heads/master
Commit: d5cb88d52ef1cc8ca315bbf703fa090ef197ba33
Parents: cf2e185
Author: Matt McCline 
Authored: Tue Jun 19 00:13:48 2018 -0500
Committer: Matt McCline 
Committed: Tue Jun 19 00:13:48 2018 -0500

--
 common/src/java/org/apache/hadoop/hive/conf/HiveConf.java  | 2 +-
 ql/src/test/results/clientpositive/llap/jdbc_handler.q.out | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d5cb88d5/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 933bda4..e7f5fc0 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -3563,7 +3563,7 @@ public class HiveConf extends Configuration {
 "The default value is false."),
 HIVE_VECTORIZATION_ROW_DESERIALIZE_INPUTFORMAT_EXCLUDES(
 "hive.vectorized.row.serde.inputformat.excludes",
-
"org.apache.parquet.hadoop.ParquetInputFormat,org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat,org.apache.hive.storage.jdbc.JdbcInputFormat",
+
"org.apache.parquet.hadoop.ParquetInputFormat,org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
 "The input formats not supported by row deserialize vectorization."),
 HIVE_VECTOR_ADAPTOR_USAGE_MODE("hive.vectorized.adaptor.usage.mode", 
"all", new StringSet("none", "chosen", "all"),
 "Specifies the extent to which the VectorUDFAdaptor will be used for 
UDFs that do not have a corresponding vectorized class.\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/d5cb88d5/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out 
b/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out
index ffeafdb..e7e3bc3 100644
--- a/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out
+++ b/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out
@@ -237,7 +237,7 @@ STAGE PLANS:
   sort order: 
   Statistics: Num rows: 1 Data size: 184 Basic stats: 
COMPLETE Column stats: NONE
   value expressions: _col0 (type: string)
-Execution mode: llap
+Execution mode: vectorized, llap
 LLAP IO: no inputs
 Map 4 
 Map Operator Tree:



[1/2] hive git commit: HIVE-19777 : NPE in TezSessionState (Sergey Shelukhin, reviewed by Jason Dere)

2018-06-18 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/branch-3 6c329a297 -> 7c32fce80
  refs/heads/master 1a610cc54 -> cf2e18534


HIVE-19777 : NPE in TezSessionState (Sergey Shelukhin, reviewed by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cf2e1853
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cf2e1853
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cf2e1853

Branch: refs/heads/master
Commit: cf2e185341cf517ae7c5d36047d345d086f4ecb5
Parents: 1a610cc
Author: sergey 
Authored: Mon Jun 18 15:15:38 2018 -0700
Committer: sergey 
Committed: Mon Jun 18 15:15:38 2018 -0700

--
 .../hive/ql/exec/tez/TezSessionState.java   | 33 +---
 1 file changed, 28 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/cf2e1853/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
index fe139c9..08e65a4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
@@ -180,7 +180,11 @@ public class TezSessionState {
   return false;
 }
 try {
-  session = sessionFuture.get(0, TimeUnit.NANOSECONDS);
+  TezClient session = sessionFuture.get(0, TimeUnit.NANOSECONDS);
+  if (session == null) {
+return false;
+  }
+  this.session = session;
 } catch (InterruptedException e) {
   Thread.currentThread().interrupt();
   return false;
@@ -202,7 +206,11 @@ public class TezSessionState {
   return false;
 }
 try {
-  session = sessionFuture.get(0, TimeUnit.NANOSECONDS);
+  TezClient session = sessionFuture.get(0, TimeUnit.NANOSECONDS);
+  if (session == null) {
+return false;
+  }
+  this.session = session;
 } catch (InterruptedException e) {
   Thread.currentThread().interrupt();
   return false;
@@ -363,12 +371,23 @@ public class TezSessionState {
   FutureTask sessionFuture = new FutureTask<>(new 
Callable() {
 @Override
 public TezClient call() throws Exception {
+  TezClient result = null;
   try {
-return startSessionAndContainers(session, conf, 
commonLocalResources, tezConfig, true);
+result = startSessionAndContainers(
+session, conf, commonLocalResources, tezConfig, true);
   } catch (Throwable t) {
+// The caller has already stopped the session.
 LOG.error("Failed to start Tez session", t);
 throw (t instanceof Exception) ? (Exception)t : new Exception(t);
   }
+  // Check interrupt at the last moment in case we get cancelled 
quickly.
+  // This is not bulletproof but should allow us to close session in 
most cases.
+  if (Thread.interrupted()) {
+LOG.info("Interrupted while starting Tez session");
+closeAndIgnoreExceptions(result);
+return null;
+  }
+  return result;
 }
   });
   new Thread(sessionFuture, "Tez session start thread").start();
@@ -471,7 +490,11 @@ public class TezSessionState {
   return;
 }
 try {
-  this.session = this.sessionFuture.get();
+  TezClient session = this.sessionFuture.get();
+  if (session == null) {
+throw new RuntimeException("Initialization was interrupted");
+  }
+  this.session = session;
 } catch (ExecutionException e) {
   throw new RuntimeException(e);
 }
@@ -645,7 +668,7 @@ public class TezSessionState {
 appJarLr = null;
 
 try {
-  if (getSession() != null) {
+  if (session != null) {
 LOG.info("Closing Tez Session");
 closeClient(session);
 session = null;



[2/2] hive git commit: HIVE-19777 : NPE in TezSessionState (Sergey Shelukhin, reviewed by Jason Dere)

2018-06-18 Thread sershe
HIVE-19777 : NPE in TezSessionState (Sergey Shelukhin, reviewed by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7c32fce8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7c32fce8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7c32fce8

Branch: refs/heads/branch-3
Commit: 7c32fce80694e19bef6395947e4db1d5cb8171e0
Parents: 6c329a2
Author: sergey 
Authored: Mon Jun 18 15:15:38 2018 -0700
Committer: sergey 
Committed: Mon Jun 18 15:16:04 2018 -0700

--
 .../hive/ql/exec/tez/TezSessionState.java   | 33 +---
 1 file changed, 28 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/7c32fce8/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
index fe139c9..08e65a4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
@@ -180,7 +180,11 @@ public class TezSessionState {
   return false;
 }
 try {
-  session = sessionFuture.get(0, TimeUnit.NANOSECONDS);
+  TezClient session = sessionFuture.get(0, TimeUnit.NANOSECONDS);
+  if (session == null) {
+return false;
+  }
+  this.session = session;
 } catch (InterruptedException e) {
   Thread.currentThread().interrupt();
   return false;
@@ -202,7 +206,11 @@ public class TezSessionState {
   return false;
 }
 try {
-  session = sessionFuture.get(0, TimeUnit.NANOSECONDS);
+  TezClient session = sessionFuture.get(0, TimeUnit.NANOSECONDS);
+  if (session == null) {
+return false;
+  }
+  this.session = session;
 } catch (InterruptedException e) {
   Thread.currentThread().interrupt();
   return false;
@@ -363,12 +371,23 @@ public class TezSessionState {
   FutureTask sessionFuture = new FutureTask<>(new 
Callable() {
 @Override
 public TezClient call() throws Exception {
+  TezClient result = null;
   try {
-return startSessionAndContainers(session, conf, 
commonLocalResources, tezConfig, true);
+result = startSessionAndContainers(
+session, conf, commonLocalResources, tezConfig, true);
   } catch (Throwable t) {
+// The caller has already stopped the session.
 LOG.error("Failed to start Tez session", t);
 throw (t instanceof Exception) ? (Exception)t : new Exception(t);
   }
+  // Check interrupt at the last moment in case we get cancelled 
quickly.
+  // This is not bulletproof but should allow us to close session in 
most cases.
+  if (Thread.interrupted()) {
+LOG.info("Interrupted while starting Tez session");
+closeAndIgnoreExceptions(result);
+return null;
+  }
+  return result;
 }
   });
   new Thread(sessionFuture, "Tez session start thread").start();
@@ -471,7 +490,11 @@ public class TezSessionState {
   return;
 }
 try {
-  this.session = this.sessionFuture.get();
+  TezClient session = this.sessionFuture.get();
+  if (session == null) {
+throw new RuntimeException("Initialization was interrupted");
+  }
+  this.session = session;
 } catch (ExecutionException e) {
   throw new RuntimeException(e);
 }
@@ -645,7 +668,7 @@ public class TezSessionState {
 appJarLr = null;
 
 try {
-  if (getSession() != null) {
+  if (session != null) {
 LOG.info("Closing Tez Session");
 closeClient(session);
 session = null;



[45/67] [abbrv] hive git commit: HIVE-19923: Follow up of HIVE-19615, use UnaryFunction instead of prefix (Slim Bouguerra, reviewed by Nishant Bangarwa, Ashutosh Chauhan)

2018-06-18 Thread sershe
HIVE-19923: Follow up of HIVE-19615, use UnaryFunction instead of prefix (Slim 
Bouguerra, reviewed by Nishant Bangarwa, Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3a6ad266
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3a6ad266
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3a6ad266

Branch: refs/heads/master-txnstats
Commit: 3a6ad2661e5fdd3e6ce8b8f7ee5a35ddb3bd2c47
Parents: 6a16a71
Author: Slim Bouguerra 
Authored: Mon Jun 18 07:54:44 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Mon Jun 18 07:54:44 2018 -0700

--
 .../ql/parse/DruidSqlOperatorConverter.java | 35 ++--
 .../clientpositive/druid/druidmini_test1.q.out  |  2 +-
 2 files changed, 34 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3a6ad266/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
index 4db0714..6aa98c0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.parse;
 import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
 import com.google.common.collect.Maps;
 import org.apache.calcite.adapter.druid.DirectOperatorConversion;
 import org.apache.calcite.adapter.druid.DruidExpressions;
@@ -51,6 +52,7 @@ import org.joda.time.Period;
 
 import javax.annotation.Nullable;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.TimeZone;
 
@@ -87,9 +89,9 @@ public class DruidSqlOperatorConverter {
   druidOperatorMap
   .put(SqlStdOperatorTable.SUBSTRING, new 
DruidSqlOperatorConverter.DruidSubstringOperatorConversion());
   druidOperatorMap
-  .put(SqlStdOperatorTable.IS_NULL, new 
UnarySuffixOperatorConversion(SqlStdOperatorTable.IS_NULL, "isnull"));
+  .put(SqlStdOperatorTable.IS_NULL, new 
UnaryFunctionOperatorConversion(SqlStdOperatorTable.IS_NULL, "isnull"));
   druidOperatorMap.put(SqlStdOperatorTable.IS_NOT_NULL,
-  new UnarySuffixOperatorConversion(SqlStdOperatorTable.IS_NOT_NULL, 
"notnull")
+  new UnaryFunctionOperatorConversion(SqlStdOperatorTable.IS_NOT_NULL, 
"notnull")
   );
   druidOperatorMap.put(HiveTruncSqlOperator.INSTANCE, new 
DruidDateTruncOperatorConversion());
   druidOperatorMap.put(HiveToDateSqlOperator.INSTANCE, new 
DruidToDateOperatorConversion());
@@ -346,4 +348,33 @@ public class DruidSqlOperatorConverter {
 );
   }
 
+  public static class UnaryFunctionOperatorConversion implements 
org.apache.calcite.adapter.druid.DruidSqlOperatorConverter {
+
+private final SqlOperator operator;
+private final String druidOperator;
+
+public UnaryFunctionOperatorConversion(SqlOperator operator, String 
druidOperator) {
+  this.operator = operator;
+  this.druidOperator = druidOperator;
+}
+
+@Override public SqlOperator calciteOperator() {
+  return operator;
+}
+
+@Override public String toDruidExpression(RexNode rexNode, RelDataType 
rowType,
+DruidQuery druidQuery) {
+  final RexCall call = (RexCall) rexNode;
+
+  final List druidExpressions = 
DruidExpressions.toDruidExpressions(
+  druidQuery, rowType,
+  call.getOperands());
+
+  if (druidExpressions == null) {
+return null;
+  }
+
+  return DruidQuery.format("%s(%s)", druidOperator, 
Iterables.getOnlyElement(druidExpressions));
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/3a6ad266/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
--
diff --git a/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out 
b/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
index 89da36a..4e078aa 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
@@ -814,7 +814,7 @@ STAGE PLANS:
   properties:
 druid.fieldNames vc,vc0
 druid.fieldTypes boolean,boolean
-druid.query.json 
{"queryType":"scan","dataSource":"default.druid_table_n3","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"(\"cstring1\"
 

[63/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
--
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h 
b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index 1d57aee..d3449a7 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -105,7 +105,7 @@ class ThriftHiveMetastoreIf : virtual public  
::facebook::fb303::FacebookService
   virtual void get_partitions_by_names(std::vector & _return, const 
std::string& db_name, const std::string& tbl_name, const 
std::vector & names) = 0;
   virtual void alter_partition(const std::string& db_name, const std::string& 
tbl_name, const Partition& new_part) = 0;
   virtual void alter_partitions(const std::string& db_name, const std::string& 
tbl_name, const std::vector & new_parts) = 0;
-  virtual void alter_partitions_with_environment_context(const std::string& 
db_name, const std::string& tbl_name, const std::vector & new_parts, 
const EnvironmentContext& environment_context) = 0;
+  virtual void 
alter_partitions_with_environment_context(AlterPartitionsResponse& _return, 
const AlterPartitionsRequest& req) = 0;
   virtual void alter_partition_with_environment_context(const std::string& 
db_name, const std::string& tbl_name, const Partition& new_part, const 
EnvironmentContext& environment_context) = 0;
   virtual void rename_partition(const std::string& db_name, const std::string& 
tbl_name, const std::vector & part_vals, const Partition& 
new_part) = 0;
   virtual bool partition_name_has_valid_characters(const 
std::vector & part_vals, const bool throw_exception) = 0;
@@ -516,7 +516,7 @@ class ThriftHiveMetastoreNull : virtual public 
ThriftHiveMetastoreIf , virtual p
   void alter_partitions(const std::string& /* db_name */, const std::string& 
/* tbl_name */, const std::vector & /* new_parts */) {
 return;
   }
-  void alter_partitions_with_environment_context(const std::string& /* db_name 
*/, const std::string& /* tbl_name */, const std::vector & /* 
new_parts */, const EnvironmentContext& /* environment_context */) {
+  void alter_partitions_with_environment_context(AlterPartitionsResponse& /* 
_return */, const AlterPartitionsRequest& /* req */) {
 return;
   }
   void alter_partition_with_environment_context(const std::string& /* db_name 
*/, const std::string& /* tbl_name */, const Partition& /* new_part */, const 
EnvironmentContext& /* environment_context */) {
@@ -11637,11 +11637,8 @@ class ThriftHiveMetastore_alter_partitions_presult {
 };
 
 typedef struct 
_ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset {
-  _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset() 
: db_name(false), tbl_name(false), new_parts(false), environment_context(false) 
{}
-  bool db_name :1;
-  bool tbl_name :1;
-  bool new_parts :1;
-  bool environment_context :1;
+  _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset() 
: req(false) {}
+  bool req :1;
 } _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset;
 
 class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
@@ -11649,34 +11646,19 @@ class 
ThriftHiveMetastore_alter_partitions_with_environment_context_args {
 
   ThriftHiveMetastore_alter_partitions_with_environment_context_args(const 
ThriftHiveMetastore_alter_partitions_with_environment_context_args&);
   ThriftHiveMetastore_alter_partitions_with_environment_context_args& 
operator=(const 
ThriftHiveMetastore_alter_partitions_with_environment_context_args&);
-  ThriftHiveMetastore_alter_partitions_with_environment_context_args() : 
db_name(), tbl_name() {
+  ThriftHiveMetastore_alter_partitions_with_environment_context_args() {
   }
 
   virtual 
~ThriftHiveMetastore_alter_partitions_with_environment_context_args() throw();
-  std::string db_name;
-  std::string tbl_name;
-  std::vector  new_parts;
-  EnvironmentContext environment_context;
+  AlterPartitionsRequest req;
 
   _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset 
__isset;
 
-  void __set_db_name(const std::string& val);
-
-  void __set_tbl_name(const std::string& val);
-
-  void __set_new_parts(const std::vector & val);
-
-  void __set_environment_context(const EnvironmentContext& val);
+  void __set_req(const AlterPartitionsRequest& val);
 
   bool operator == (const 
ThriftHiveMetastore_alter_partitions_with_environment_context_args & rhs) const
   {
-if (!(db_name == rhs.db_name))
-  return false;
-if (!(tbl_name == rhs.tbl_name))
-  return false;
-if (!(new_parts == rhs.new_parts))
-  return false;
-if (!(environment_context == rhs.environment_context))
+if (!(req == rhs.req))
   return false;
 return true;
   }
@@ -11697,17 +11679,15 

[65/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/test/results/clientpositive/stats_part2.q.out
--
diff --git a/ql/src/test/results/clientpositive/stats_part2.q.out 
b/ql/src/test/results/clientpositive/stats_part2.q.out
new file mode 100644
index 000..94e186d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/stats_part2.q.out
@@ -0,0 +1,1261 @@
+PREHOOK: query: drop table if exists mysource
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists mysource
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table mysource (p int, key int, value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mysource
+POSTHOOK: query: create table mysource (p int, key int, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mysource
+PREHOOK: query: insert into mysource values (100,20,'value20'), 
(101,40,'string40'), (102,50,'string50')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,20,'value20'), 
(101,40,'string40'), (102,50,'string50')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+POSTHOOK: Lineage: mysource.value SCRIPT []
+PREHOOK: query: insert into mysource values (100,21,'value21'), 
(101,41,'value41'), (102,51,'value51')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,21,'value21'), 
(101,41,'value41'), (102,51,'value51')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+POSTHOOK: Lineage: mysource.value SCRIPT []
+PREHOOK: query: drop table if exists stats_partitioned
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists stats_partitioned
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table stats_part(key int,value string) partitioned by 
(p int) stored as orc tblproperties ("transactional"="true")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_part
+POSTHOOK: query: create table stats_part(key int,value string) partitioned by 
(p int) stored as orc tblproperties ("transactional"="true")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_part
+PREHOOK: query: explain select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: stats_part
+Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column 
stats: COMPLETE
+Filter Operator
+  predicate: (p > 100) (type: boolean)
+  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE 
Column stats: COMPLETE
+  Select Operator
+Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE 
Column stats: COMPLETE
+Group By Operator
+  aggregations: count()
+  mode: hash
+  outputColumnNames: _col0
+  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
+  Reduce Output Operator
+sort order: 
+Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
+value expressions: _col0 (type: bigint)
+  Execution mode: vectorized
+  Reduce Operator Tree:
+Group By Operator
+  aggregations: count(VALUE._col0)
+  mode: mergepartial
+  outputColumnNames: _col0
+  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: COMPLETE
+  File Output Operator
+compressed: false
+Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: COMPLETE
+table:
+input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+Fetch Operator
+  limit: -1
+  Processor Tree:
+ListSink
+
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE 

[62/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp 
b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index bc4d168..c2d6a56 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -215,6 +215,18 @@ const char* _kSchemaVersionStateNames[] = {
 };
 const std::map 
_SchemaVersionState_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(8, 
_kSchemaVersionStateValues, _kSchemaVersionStateNames), 
::apache::thrift::TEnumIterator(-1, NULL, NULL));
 
+int _kIsolationLevelComplianceValues[] = {
+  IsolationLevelCompliance::YES,
+  IsolationLevelCompliance::NO,
+  IsolationLevelCompliance::UNKNOWN
+};
+const char* _kIsolationLevelComplianceNames[] = {
+  "YES",
+  "NO",
+  "UNKNOWN"
+};
+const std::map 
_IsolationLevelCompliance_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, 
_kIsolationLevelComplianceValues, _kIsolationLevelComplianceNames), 
::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
 int _kFunctionTypeValues[] = {
   FunctionType::JAVA
 };
@@ -6435,6 +6447,21 @@ void Table::__set_ownerType(const PrincipalType::type 
val) {
 __isset.ownerType = true;
 }
 
+void Table::__set_txnId(const int64_t val) {
+  this->txnId = val;
+__isset.txnId = true;
+}
+
+void Table::__set_validWriteIdList(const std::string& val) {
+  this->validWriteIdList = val;
+__isset.validWriteIdList = true;
+}
+
+void Table::__set_isStatsCompliant(const IsolationLevelCompliance::type val) {
+  this->isStatsCompliant = val;
+__isset.isStatsCompliant = true;
+}
+
 uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -6629,6 +6656,32 @@ uint32_t 
Table::read(::apache::thrift::protocol::TProtocol* iprot) {
   xfer += iprot->skip(ftype);
 }
 break;
+  case 19:
+if (ftype == ::apache::thrift::protocol::T_I64) {
+  xfer += iprot->readI64(this->txnId);
+  this->__isset.txnId = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  case 20:
+if (ftype == ::apache::thrift::protocol::T_STRING) {
+  xfer += iprot->readString(this->validWriteIdList);
+  this->__isset.validWriteIdList = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  case 21:
+if (ftype == ::apache::thrift::protocol::T_I32) {
+  int32_t ecast249;
+  xfer += iprot->readI32(ecast249);
+  this->isStatsCompliant = (IsolationLevelCompliance::type)ecast249;
+  this->__isset.isStatsCompliant = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
   default:
 xfer += iprot->skip(ftype);
 break;
@@ -6677,10 +6730,10 @@ uint32_t 
Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
   xfer += oprot->writeFieldBegin("partitionKeys", 
::apache::thrift::protocol::T_LIST, 8);
   {
 xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, 
static_cast(this->partitionKeys.size()));
-std::vector ::const_iterator _iter249;
-for (_iter249 = this->partitionKeys.begin(); _iter249 != 
this->partitionKeys.end(); ++_iter249)
+std::vector ::const_iterator _iter250;
+for (_iter250 = this->partitionKeys.begin(); _iter250 != 
this->partitionKeys.end(); ++_iter250)
 {
-  xfer += (*_iter249).write(oprot);
+  xfer += (*_iter250).write(oprot);
 }
 xfer += oprot->writeListEnd();
   }
@@ -6689,11 +6742,11 @@ uint32_t 
Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
   xfer += oprot->writeFieldBegin("parameters", 
::apache::thrift::protocol::T_MAP, 9);
   {
 xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, 
::apache::thrift::protocol::T_STRING, 
static_cast(this->parameters.size()));
-std::map ::const_iterator _iter250;
-for (_iter250 = this->parameters.begin(); _iter250 != 
this->parameters.end(); ++_iter250)
+std::map ::const_iterator _iter251;
+for (_iter251 = this->parameters.begin(); _iter251 != 
this->parameters.end(); ++_iter251)
 {
-  xfer += oprot->writeString(_iter250->first);
-  xfer += oprot->writeString(_iter250->second);
+  xfer += oprot->writeString(_iter251->first);
+  xfer += oprot->writeString(_iter251->second);
 }
 xfer += oprot->writeMapEnd();
   }
@@ -6741,6 +6794,21 @@ uint32_t 
Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
 xfer += oprot->writeI32((int32_t)this->ownerType);
 xfer += oprot->writeFieldEnd();
   }
+  if (this->__isset.txnId) {
+xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 

[41/67] [abbrv] hive git commit: HIVE-19898: Disable TransactionalValidationListener when the table is not in the Hive catalog (Jason Dere, reviewed by Eugene Koifman)

2018-06-18 Thread sershe
HIVE-19898: Disable TransactionalValidationListener when the table is not in 
the Hive catalog (Jason Dere, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ebd2c5f8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ebd2c5f8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ebd2c5f8

Branch: refs/heads/master-txnstats
Commit: ebd2c5f8a82b35eabca146520ffcd87605084618
Parents: 766c3dc
Author: Jason Dere 
Authored: Sun Jun 17 21:53:23 2018 -0700
Committer: Jason Dere 
Committed: Sun Jun 17 21:53:23 2018 -0700

--
 .../TestTransactionalValidationListener.java| 127 +++
 .../TransactionalValidationListener.java|  23 +++-
 .../metastore/client/MetaStoreClientTest.java   |   2 +-
 3 files changed, 146 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ebd2c5f8/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestTransactionalValidationListener.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestTransactionalValidationListener.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestTransactionalValidationListener.java
new file mode 100644
index 000..3aaad22
--- /dev/null
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestTransactionalValidationListener.java
@@ -0,0 +1,127 @@
+package org.apache.hadoop.hive.metastore;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
+import org.apache.hadoop.hive.metastore.api.Catalog;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.MetaStoreClientTest;
+import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+@RunWith(Parameterized.class)
+public class TestTransactionalValidationListener extends MetaStoreClientTest {
+
+  private AbstractMetaStoreService metaStore;
+  private IMetaStoreClient client;
+  private boolean createdCatalogs = false;
+
+  @BeforeClass
+  public static void startMetaStores() {
+Map msConf = new 
HashMap();
+
+// Enable TransactionalValidationListener + create.as.acid
+Map extraConf = new HashMap<>();
+extraConf.put("metastore.create.as.acid", "true");
+extraConf.put("hive.txn.manager", 
"org.apache.hadoop.hive.ql.lockmgr.DbTxnManager");
+extraConf.put("hive.support.concurrency", "true");
+startMetaStores(msConf, extraConf);
+  }
+
+  @Before
+  public void setUp() throws Exception {
+// Get new client
+client = metaStore.getClient();
+if (!createdCatalogs) {
+  createCatalogs();
+  createdCatalogs = true;
+}
+  }
+
+  @After
+  public void tearDown() throws Exception {
+try {
+  if (client != null) {
+client.close();
+  }
+} finally {
+  client = null;
+}
+  }
+
+  public TestTransactionalValidationListener(String name, 
AbstractMetaStoreService metaStore) throws Exception {
+this.metaStore = metaStore;
+  }
+
+  private void createCatalogs() throws Exception {
+String[] catNames = {"spark", "myapp"};
+String[] location = {MetaStoreTestUtils.getTestWarehouseDir("spark"),
+ MetaStoreTestUtils.getTestWarehouseDir("myapp")};
+
+for (int i = 0; i < catNames.length; i++) {
+  Catalog cat = new CatalogBuilder()
+  .setName(catNames[i])
+  .setLocation(location[i])
+  .build();
+  client.createCatalog(cat);
+  File dir = new File(cat.getLocationUri());
+  Assert.assertTrue(dir.exists() && dir.isDirectory());
+}
+  }
+
+  private Table createOrcTable(String catalog) throws Exception {
+Table table = new Table();
+StorageDescriptor sd = new StorageDescriptor();
+List cols = new ArrayList<>();
+
+

[58/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
index 38d4f64..d9f17cc 100644
--- 
a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
+++ 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
@@ -56,6 +56,9 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField 
CREATION_METADATA_FIELD_DESC = new 
org.apache.thrift.protocol.TField("creationMetadata", 
org.apache.thrift.protocol.TType.STRUCT, (short)16);
   private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = 
new org.apache.thrift.protocol.TField("catName", 
org.apache.thrift.protocol.TType.STRING, (short)17);
   private static final org.apache.thrift.protocol.TField OWNER_TYPE_FIELD_DESC 
= new org.apache.thrift.protocol.TField("ownerType", 
org.apache.thrift.protocol.TType.I32, (short)18);
+  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = 
new org.apache.thrift.protocol.TField("txnId", 
org.apache.thrift.protocol.TType.I64, (short)19);
+  private static final org.apache.thrift.protocol.TField 
VALID_WRITE_ID_LIST_FIELD_DESC = new 
org.apache.thrift.protocol.TField("validWriteIdList", 
org.apache.thrift.protocol.TType.STRING, (short)20);
+  private static final org.apache.thrift.protocol.TField 
IS_STATS_COMPLIANT_FIELD_DESC = new 
org.apache.thrift.protocol.TField("isStatsCompliant", 
org.apache.thrift.protocol.TType.I32, (short)21);
 
   private static final Map, SchemeFactory> schemes = 
new HashMap, SchemeFactory>();
   static {
@@ -81,6 +84,9 @@ import org.slf4j.LoggerFactory;
   private CreationMetadata creationMetadata; // optional
   private String catName; // optional
   private PrincipalType ownerType; // optional
+  private long txnId; // optional
+  private String validWriteIdList; // optional
+  private IsolationLevelCompliance isStatsCompliant; // optional
 
   /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -105,7 +111,14 @@ import org.slf4j.LoggerFactory;
  * 
  * @see PrincipalType
  */
-OWNER_TYPE((short)18, "ownerType");
+OWNER_TYPE((short)18, "ownerType"),
+TXN_ID((short)19, "txnId"),
+VALID_WRITE_ID_LIST((short)20, "validWriteIdList"),
+/**
+ * 
+ * @see IsolationLevelCompliance
+ */
+IS_STATS_COMPLIANT((short)21, "isStatsCompliant");
 
 private static final Map byName = new HashMap();
 
@@ -156,6 +169,12 @@ import org.slf4j.LoggerFactory;
   return CAT_NAME;
 case 18: // OWNER_TYPE
   return OWNER_TYPE;
+case 19: // TXN_ID
+  return TXN_ID;
+case 20: // VALID_WRITE_ID_LIST
+  return VALID_WRITE_ID_LIST;
+case 21: // IS_STATS_COMPLIANT
+  return IS_STATS_COMPLIANT;
 default:
   return null;
   }
@@ -201,8 +220,9 @@ import org.slf4j.LoggerFactory;
   private static final int __RETENTION_ISSET_ID = 2;
   private static final int __TEMPORARY_ISSET_ID = 3;
   private static final int __REWRITEENABLED_ISSET_ID = 4;
+  private static final int __TXNID_ISSET_ID = 5;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = 
{_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE};
+  private static final _Fields optionals[] = 
{_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> 
metaDataMap;
   static {
 Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new 
EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -245,6 +265,12 @@ import org.slf4j.LoggerFactory;
 new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
 tmpMap.put(_Fields.OWNER_TYPE, new 
org.apache.thrift.meta_data.FieldMetaData("ownerType", 
org.apache.thrift.TFieldRequirementType.OPTIONAL, 
 new 
org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, 
PrincipalType.class)));
+tmpMap.put(_Fields.TXN_ID, new 
org.apache.thrift.meta_data.FieldMetaData("txnId", 
org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+new 

[54/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
 
b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 5a3f2c1..cb5e158 100644
--- 
a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ 
b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -717,13 +717,10 @@ class Iface(fb303.FacebookService.Iface):
 """
 pass
 
-  def alter_partitions_with_environment_context(self, db_name, tbl_name, 
new_parts, environment_context):
+  def alter_partitions_with_environment_context(self, req):
 """
 Parameters:
- - db_name
- - tbl_name
- - new_parts
- - environment_context
+ - req
 """
 pass
 
@@ -4734,24 +4731,18 @@ class Client(fb303.FacebookService.Client, Iface):
   raise result.o2
 return
 
-  def alter_partitions_with_environment_context(self, db_name, tbl_name, 
new_parts, environment_context):
+  def alter_partitions_with_environment_context(self, req):
 """
 Parameters:
- - db_name
- - tbl_name
- - new_parts
- - environment_context
+ - req
 """
-self.send_alter_partitions_with_environment_context(db_name, tbl_name, 
new_parts, environment_context)
-self.recv_alter_partitions_with_environment_context()
+self.send_alter_partitions_with_environment_context(req)
+return self.recv_alter_partitions_with_environment_context()
 
-  def send_alter_partitions_with_environment_context(self, db_name, tbl_name, 
new_parts, environment_context):
+  def send_alter_partitions_with_environment_context(self, req):
 self._oprot.writeMessageBegin('alter_partitions_with_environment_context', 
TMessageType.CALL, self._seqid)
 args = alter_partitions_with_environment_context_args()
-args.db_name = db_name
-args.tbl_name = tbl_name
-args.new_parts = new_parts
-args.environment_context = environment_context
+args.req = req
 args.write(self._oprot)
 self._oprot.writeMessageEnd()
 self._oprot.trans.flush()
@@ -4767,11 +4758,13 @@ class Client(fb303.FacebookService.Client, Iface):
 result = alter_partitions_with_environment_context_result()
 result.read(iprot)
 iprot.readMessageEnd()
+if result.success is not None:
+  return result.success
 if result.o1 is not None:
   raise result.o1
 if result.o2 is not None:
   raise result.o2
-return
+raise TApplicationException(TApplicationException.MISSING_RESULT, 
"alter_partitions_with_environment_context failed: unknown result")
 
   def alter_partition_with_environment_context(self, db_name, tbl_name, 
new_part, environment_context):
 """
@@ -11366,7 +11359,7 @@ class Processor(fb303.FacebookService.Processor, Iface, 
TProcessor):
 iprot.readMessageEnd()
 result = alter_partitions_with_environment_context_result()
 try:
-  self._handler.alter_partitions_with_environment_context(args.db_name, 
args.tbl_name, args.new_parts, args.environment_context)
+  result.success = 
self._handler.alter_partitions_with_environment_context(args.req)
   msg_type = TMessageType.REPLY
 except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
   raise
@@ -15987,10 +15980,10 @@ class get_databases_result:
   if fid == 0:
 if ftype == TType.LIST:
   self.success = []
-  (_etype819, _size816) = iprot.readListBegin()
-  for _i820 in xrange(_size816):
-_elem821 = iprot.readString()
-self.success.append(_elem821)
+  (_etype826, _size823) = iprot.readListBegin()
+  for _i827 in xrange(_size823):
+_elem828 = iprot.readString()
+self.success.append(_elem828)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -16013,8 +16006,8 @@ class get_databases_result:
 if self.success is not None:
   oprot.writeFieldBegin('success', TType.LIST, 0)
   oprot.writeListBegin(TType.STRING, len(self.success))
-  for iter822 in self.success:
-oprot.writeString(iter822)
+  for iter829 in self.success:
+oprot.writeString(iter829)
   oprot.writeListEnd()
   oprot.writeFieldEnd()
 if self.o1 is not None:
@@ -16119,10 +16112,10 @@ class get_all_databases_result:
   if fid == 0:
 if ftype == TType.LIST:
   self.success = []
-  (_etype826, _size823) = iprot.readListBegin()
-  for _i827 in xrange(_size823):
-_elem828 = iprot.readString()
-self.success.append(_elem828)
+  (_etype833, _size830) = iprot.readListBegin()
+  for _i834 in xrange(_size830):
+_elem835 = iprot.readString()
+

[53/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py 
b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 11affe3..ccca4e9 100644
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -305,6 +305,23 @@ class SchemaVersionState:
 "DELETED": 8,
   }
 
+class IsolationLevelCompliance:
+  YES = 1
+  NO = 2
+  UNKNOWN = 3
+
+  _VALUES_TO_NAMES = {
+1: "YES",
+2: "NO",
+3: "UNKNOWN",
+  }
+
+  _NAMES_TO_VALUES = {
+"YES": 1,
+"NO": 2,
+"UNKNOWN": 3,
+  }
+
 class FunctionType:
   JAVA = 1
 
@@ -4550,6 +4567,9 @@ class Table:
- creationMetadata
- catName
- ownerType
+   - txnId
+   - validWriteIdList
+   - isStatsCompliant
   """
 
   thrift_spec = (
@@ -4572,9 +4592,12 @@ class Table:
 (16, TType.STRUCT, 'creationMetadata', (CreationMetadata, 
CreationMetadata.thrift_spec), None, ), # 16
 (17, TType.STRING, 'catName', None, None, ), # 17
 (18, TType.I32, 'ownerType', None, 1, ), # 18
+(19, TType.I64, 'txnId', None, -1, ), # 19
+(20, TType.STRING, 'validWriteIdList', None, None, ), # 20
+(21, TType.I32, 'isStatsCompliant', None, None, ), # 21
   )
 
-  def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, 
lastAccessTime=None, retention=None, sd=None, partitionKeys=None, 
parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, 
privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, 
creationMetadata=None, catName=None, ownerType=thrift_spec[18][4],):
+  def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, 
lastAccessTime=None, retention=None, sd=None, partitionKeys=None, 
parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, 
privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, 
creationMetadata=None, catName=None, ownerType=thrift_spec[18][4], 
txnId=thrift_spec[19][4], validWriteIdList=None, isStatsCompliant=None,):
 self.tableName = tableName
 self.dbName = dbName
 self.owner = owner
@@ -4593,6 +4616,9 @@ class Table:
 self.creationMetadata = creationMetadata
 self.catName = catName
 self.ownerType = ownerType
+self.txnId = txnId
+self.validWriteIdList = validWriteIdList
+self.isStatsCompliant = isStatsCompliant
 
   def read(self, iprot):
 if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is 
not None and fastbinary is not None:
@@ -4708,6 +4734,21 @@ class Table:
   self.ownerType = iprot.readI32()
 else:
   iprot.skip(ftype)
+  elif fid == 19:
+if ftype == TType.I64:
+  self.txnId = iprot.readI64()
+else:
+  iprot.skip(ftype)
+  elif fid == 20:
+if ftype == TType.STRING:
+  self.validWriteIdList = iprot.readString()
+else:
+  iprot.skip(ftype)
+  elif fid == 21:
+if ftype == TType.I32:
+  self.isStatsCompliant = iprot.readI32()
+else:
+  iprot.skip(ftype)
   else:
 iprot.skip(ftype)
   iprot.readFieldEnd()
@@ -4797,6 +4838,18 @@ class Table:
   oprot.writeFieldBegin('ownerType', TType.I32, 18)
   oprot.writeI32(self.ownerType)
   oprot.writeFieldEnd()
+if self.txnId is not None:
+  oprot.writeFieldBegin('txnId', TType.I64, 19)
+  oprot.writeI64(self.txnId)
+  oprot.writeFieldEnd()
+if self.validWriteIdList is not None:
+  oprot.writeFieldBegin('validWriteIdList', TType.STRING, 20)
+  oprot.writeString(self.validWriteIdList)
+  oprot.writeFieldEnd()
+if self.isStatsCompliant is not None:
+  oprot.writeFieldBegin('isStatsCompliant', TType.I32, 21)
+  oprot.writeI32(self.isStatsCompliant)
+  oprot.writeFieldEnd()
 oprot.writeFieldStop()
 oprot.writeStructEnd()
 
@@ -4824,6 +4877,9 @@ class Table:
 value = (value * 31) ^ hash(self.creationMetadata)
 value = (value * 31) ^ hash(self.catName)
 value = (value * 31) ^ hash(self.ownerType)
+value = (value * 31) ^ hash(self.txnId)
+value = (value * 31) ^ hash(self.validWriteIdList)
+value = (value * 31) ^ hash(self.isStatsCompliant)
 return value
 
   def __repr__(self):
@@ -4849,6 +4905,9 @@ class Partition:
- parameters
- privileges
- catName
+   - txnId
+   - validWriteIdList
+   - isStatsCompliant
   """
 
   thrift_spec = (
@@ -4862,9 +4921,12 @@ class Partition:
 (7, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, 
), # 7
 (8, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, 
PrincipalPrivilegeSet.thrift_spec), 

[34/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth 
Jayachandran reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dd512593
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dd512593
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dd512593

Branch: refs/heads/master-txnstats
Commit: dd5125939b5b5ae652e39725cfcf2379e6cb0fea
Parents: 040c078
Author: Prasanth Jayachandran 
Authored: Sat Jun 16 11:23:55 2018 -0700
Committer: Prasanth Jayachandran 
Committed: Sat Jun 16 11:23:55 2018 -0700

--
 .../test/resources/testconfiguration.properties |3 +
 .../hive/llap/io/api/impl/LlapInputFormat.java  |6 +
 .../io/decode/GenericColumnVectorProducer.java  |9 +-
 .../llap/io/decode/OrcEncodedDataConsumer.java  |   40 +-
 .../llap/io/encoded/OrcEncodedDataReader.java   |   65 +-
 .../llap/io/encoded/SerDeEncodedDataReader.java |3 +
 .../io/encoded/VectorDeserializeOrcWriter.java  |   49 +-
 .../llap/io/metadata/ConsumerFileMetadata.java  |2 +
 .../hive/llap/io/metadata/OrcFileMetadata.java  |9 +-
 .../hadoop/hive/ql/exec/FetchOperator.java  |4 +-
 .../ql/exec/vector/VectorizationContext.java|7 +-
 .../vector/VectorizedInputFormatInterface.java  |1 +
 .../ql/exec/vector/VectorizedRowBatchCtx.java   |5 +
 .../VectorInBloomFilterColDynamicValue.java |1 +
 .../aggregates/VectorUDAFBloomFilter.java   |1 +
 .../ql/exec/vector/udf/VectorUDFAdaptor.java|8 +
 .../hadoop/hive/ql/io/BatchToRowReader.java |8 +-
 .../hadoop/hive/ql/io/NullRowsInputFormat.java  |6 +
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |8 +-
 .../hive/ql/io/orc/OrcRawRecordMerger.java  |   22 +-
 .../apache/hadoop/hive/ql/io/orc/Reader.java|   12 +-
 .../hadoop/hive/ql/io/orc/ReaderImpl.java   |9 +-
 .../hadoop/hive/ql/io/orc/RecordReaderImpl.java |   25 +-
 .../io/orc/VectorizedOrcAcidRowBatchReader.java |   24 +-
 .../ql/io/orc/VectorizedOrcInputFormat.java |8 +-
 .../hadoop/hive/ql/io/orc/WriterImpl.java   |   23 +-
 .../orc/encoded/EncodedTreeReaderFactory.java   |  205 +++-
 .../ql/io/parquet/MapredParquetInputFormat.java |6 +
 .../hive/ql/optimizer/physical/Vectorizer.java  |  115 +-
 .../hive/ql/io/orc/TestInputOutputFormat.java   |   81 +-
 .../hive/ql/io/orc/TestOrcRawRecordMerger.java  |   10 +-
 .../hive/ql/io/orc/TestVectorizedORCReader.java |3 +-
 .../TestVectorizedOrcAcidRowBatchReader.java|2 +-
 .../queries/clientpositive/explainanalyze_3.q   |2 +-
 ql/src/test/queries/clientpositive/llap_acid2.q |   31 +-
 .../clientpositive/llap_decimal64_reader.q  |   54 +
 .../queries/clientpositive/llap_uncompressed.q  |   13 +-
 ql/src/test/queries/clientpositive/orc_create.q |4 +-
 .../queries/clientpositive/orc_llap_counters.q  |6 +-
 .../queries/clientpositive/orc_llap_counters1.q |7 +-
 .../test/queries/clientpositive/orc_merge11.q   |8 +-
 ql/src/test/queries/clientpositive/orc_merge5.q |4 +-
 ql/src/test/queries/clientpositive/orc_merge6.q |4 +-
 ql/src/test/queries/clientpositive/orc_merge7.q |4 +-
 .../clientpositive/orc_merge_incompat1.q|4 +-
 .../clientpositive/orc_merge_incompat2.q|4 +-
 .../test/queries/clientpositive/orc_ppd_basic.q |7 +-
 .../clientpositive/orc_ppd_schema_evol_3a.q |6 +-
 .../clientpositive/orc_schema_evolution_float.q |2 +
 .../clientpositive/orc_split_elimination.q  |4 +-
 .../schema_evol_orc_nonvec_part_all_primitive.q |2 +
 ...evol_orc_nonvec_part_all_primitive_llap_io.q |2 +
 .../schema_evol_orc_vec_part_all_primitive.q|2 +
 ...ma_evol_orc_vec_part_all_primitive_llap_io.q |2 +
 .../clientpositive/type_change_test_int.q   |3 +
 .../type_change_test_int_vectorized.q   |2 +
 .../queries/clientpositive/vector_case_when_1.q |   22 +-
 .../queries/clientpositive/vector_decimal_5.q   |3 +-
 .../clientpositive/vector_decimal_mapjoin.q |6 +
 .../vectorized_dynamic_semijoin_reduction2.q|2 +-
 .../clientpositive/llap/acid_no_buckets.q.out   |   32 +-
 .../llap/acid_vectorization_original.q.out  |   14 +-
 .../llap/enforce_constraint_notnull.q.out   |   14 +-
 .../results/clientpositive/llap/llap_acid.q.out |   12 +-
 .../clientpositive/llap/llap_acid2.q.out|  302 +++--
 .../clientpositive/llap/llap_acid_fast.q.out|   12 +-
 .../llap/llap_decimal64_reader.q.out|  303 +
 .../clientpositive/llap/llap_partitioned.q.out  |   11 +-
 .../results/clientpositive/llap/llap_text.q.out | 1082 ++
 .../clientpositive/llap/llap_uncompressed.q.out |  283 +
 .../llap/llap_vector_nohybridgrace.q.out|   16 +-
 .../llap/materialized_view_create.q.out |6 +-
 .../materialized_view_create_rewrite_5.q.out|

[37/67] [abbrv] hive git commit: HIVE-19569: alter table db1.t1 rename db2.t2 generates MetaStoreEventListener.onDropTable() (Mahesh Kumar Behera, reviewed by Sankar Hariappan)

2018-06-18 Thread sershe
HIVE-19569: alter table db1.t1 rename db2.t2 generates 
MetaStoreEventListener.onDropTable() (Mahesh Kumar Behera, reviewed by Sankar 
Hariappan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d60bc73a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d60bc73a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d60bc73a

Branch: refs/heads/master-txnstats
Commit: d60bc73afcc6e755d499976baa54661a9680ed54
Parents: 3eaca1f
Author: Sankar Hariappan 
Authored: Sat Jun 16 23:27:24 2018 -0700
Committer: Sankar Hariappan 
Committed: Sat Jun 16 23:27:24 2018 -0700

--
 .../hadoop/hive/ql/TestTxnConcatenate.java  | 24 ++---
 .../hadoop/hive/metastore/HiveAlterHandler.java | 92 
 .../hadoop/hive/metastore/HiveMetaStore.java| 31 +++
 .../TestTablesCreateDropAlterTruncate.java  |  2 +-
 4 files changed, 45 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d60bc73a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
--
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
index 511198a..0e436e1 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
-import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -225,14 +224,19 @@ public class TestTxnConcatenate extends 
TxnCommandsBaseForTests {
 Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf,
 "select count(*) from NEXT_WRITE_ID where NWI_TABLE='s'"));
 
-//this causes MetaStoreEvenListener.onDropTable()/onCreateTable() to 
execute and the data
-//files are just moved under new table.  This can't work since a drop 
table in Acid removes
-//the relevant table metadata (like writeid, etc.), so writeIds in file 
names/ROW_IDs
-//no longer make sense.  (In fact 'select ...' returns nothing since there 
is no NEXT_WRITE_ID
-//entry for the 'new' table and all existing data is 'above HWM'. see 
HIVE-19569
-CommandProcessorResponse cpr =
-runStatementOnDriverNegative("alter table mydb1.S RENAME TO 
mydb2.bar");
-Assert.assertTrue(cpr.getErrorMessage() != null && cpr.getErrorMessage()
-.contains("Changing database name of a transactional table mydb1.s is 
not supported."));
+runStatementOnDriver("alter table mydb1.S RENAME TO mydb2.bar");
+
+Assert.assertEquals(
+TxnDbUtil.queryToString(hiveConf, "select * from 
COMPLETED_TXN_COMPONENTS"), 2,
+TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from COMPLETED_TXN_COMPONENTS where 
CTC_TABLE='bar'"));
+Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from COMPACTION_QUEUE where CQ_TABLE='bar'"));
+Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from WRITE_SET where WS_TABLE='bar'"));
+Assert.assertEquals(2, TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from TXN_TO_WRITE_ID where T2W_TABLE='bar'"));
+Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from NEXT_WRITE_ID where NWI_TABLE='bar'"));
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/d60bc73a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 33999d0..c2da6d3 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -23,11 +23,8 @@ import com.google.common.collect.Lists;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hive.common.TableName;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
-import org.apache.hadoop.hive.metastore.events.CreateTableEvent;

[50/67] [abbrv] hive git commit: HIVE-19853: Arrow serializer needs to create a TimeStampMicroTZVector instead of TimeStampMicroVector (Teddy Choi, reviewed by Matt McCline)

2018-06-18 Thread sershe
HIVE-19853: Arrow serializer needs to create a TimeStampMicroTZVector instead 
of TimeStampMicroVector (Teddy Choi, reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1a610cc5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1a610cc5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1a610cc5

Branch: refs/heads/master-txnstats
Commit: 1a610cc545d39b9e9116c5b90108197853d0364c
Parents: c4eb647
Author: Matt McCline 
Authored: Mon Jun 18 15:55:00 2018 -0500
Committer: Matt McCline 
Committed: Mon Jun 18 15:55:00 2018 -0500

--
 .../hadoop/hive/ql/io/arrow/Deserializer.java   | 94 +++-
 .../hadoop/hive/ql/io/arrow/Serializer.java | 15 ++--
 2 files changed, 40 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1a610cc5/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
index 6e09d39..edc4b39 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
@@ -29,9 +29,7 @@ import org.apache.arrow.vector.IntVector;
 import org.apache.arrow.vector.IntervalDayVector;
 import org.apache.arrow.vector.IntervalYearVector;
 import org.apache.arrow.vector.SmallIntVector;
-import org.apache.arrow.vector.TimeStampMicroVector;
-import org.apache.arrow.vector.TimeStampMilliVector;
-import org.apache.arrow.vector.TimeStampNanoVector;
+import org.apache.arrow.vector.TimeStampVector;
 import org.apache.arrow.vector.TinyIntVector;
 import org.apache.arrow.vector.VarBinaryVector;
 import org.apache.arrow.vector.VarCharVector;
@@ -268,35 +266,11 @@ class Deserializer {
 }
 break;
   case TIMESTAMPMILLI:
-{
-  for (int i = 0; i < size; i++) {
-if (arrowVector.isNull(i)) {
-  VectorizedBatchUtil.setNullColIsNullValue(hiveVector, i);
-} else {
-  hiveVector.isNull[i] = false;
-
-  // Time = second + sub-second
-  final long timeInMillis = ((TimeStampMilliVector) 
arrowVector).get(i);
-  final TimestampColumnVector timestampColumnVector = 
(TimestampColumnVector) hiveVector;
-  int subSecondInNanos = (int) ((timeInMillis % MILLIS_PER_SECOND) 
* NS_PER_MILLIS);
-  long second = timeInMillis / MILLIS_PER_SECOND;
-
-  // A nanosecond value should not be negative
-  if (subSecondInNanos < 0) {
-
-// So add one second to the negative nanosecond value to make 
it positive
-subSecondInNanos += NS_PER_SECOND;
-
-// Subtract one second from the second value because we added 
one second
-second -= 1;
-  }
-  timestampColumnVector.time[i] = second * MILLIS_PER_SECOND;
-  timestampColumnVector.nanos[i] = subSecondInNanos;
-}
-  }
-}
-break;
+  case TIMESTAMPMILLITZ:
   case TIMESTAMPMICRO:
+  case TIMESTAMPMICROTZ:
+  case TIMESTAMPNANO:
+  case TIMESTAMPNANOTZ:
 {
   for (int i = 0; i < size; i++) {
 if (arrowVector.isNull(i)) {
@@ -305,40 +279,36 @@ class Deserializer {
   hiveVector.isNull[i] = false;
 
   // Time = second + sub-second
-  final long timeInMicros = ((TimeStampMicroVector) 
arrowVector).get(i);
-  final TimestampColumnVector timestampColumnVector = 
(TimestampColumnVector) hiveVector;
-  int subSecondInNanos = (int) ((timeInMicros % MICROS_PER_SECOND) 
* NS_PER_MICROS);
-  long second = timeInMicros / MICROS_PER_SECOND;
-
-  // A nanosecond value should not be negative
-  if (subSecondInNanos < 0) {
-
-// So add one second to the negative nanosecond value to make 
it positive
-subSecondInNanos += NS_PER_SECOND;
-
-// Subtract one second from the second value because we added 
one second
-second -= 1;
+  final long time = ((TimeStampVector) arrowVector).get(i);
+  long second;
+  int subSecondInNanos;
+  switch (minorType) {
+case TIMESTAMPMILLI:
+case TIMESTAMPMILLITZ:
+  {
+subSecondInNanos = (int) ((time % MILLIS_PER_SECOND) * 
NS_PER_MILLIS);
+second = time / MILLIS_PER_SECOND;
+  }
+  break;
+case TIMESTAMPMICROTZ:
+case 

[44/67] [abbrv] hive git commit: HIVE-19725: Add ability to dump non-native tables in replication metadata dump (Mahesh Kumar Behera, reviewed by Sankar Hariappan)

2018-06-18 Thread sershe
HIVE-19725: Add ability to dump non-native tables in replication metadata dump 
(Mahesh Kumar Behera, reviewed by Sankar Hariappan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6a16a71c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6a16a71c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6a16a71c

Branch: refs/heads/master-txnstats
Commit: 6a16a71ce99ff5d2f7bfa69cfcb475d4adc9873f
Parents: 4ec256c
Author: Sankar Hariappan 
Authored: Mon Jun 18 06:23:41 2018 -0700
Committer: Sankar Hariappan 
Committed: Mon Jun 18 06:23:41 2018 -0700

--
 .../hadoop/hive/ql/parse/TestExportImport.java  | 44 +++-
 ...TestReplicationScenariosAcrossInstances.java | 29 -
 .../hadoop/hive/ql/parse/repl/dump/Utils.java   |  3 +-
 3 files changed, 73 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/6a16a71c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java
index 67b74c2..53d13d8 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java
@@ -30,9 +30,12 @@ import org.junit.Test;
 import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import java.io.IOException;
 import java.util.HashMap;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 public class TestExportImport {
 
@@ -122,4 +125,43 @@ public class TestExportImport {
 .verifyResults(new String[] { "1", "2" });
 
   }
+
+  @Test
+  public void testExportNonNativeTable() throws Throwable {
+String path = "hdfs:///tmp/" + dbName + "/";
+String exportPath = path + "1/";
+String exportMetaPath = exportPath + "/Meta";
+String tableName =  testName.getMethodName();
+String createTableQuery =
+"CREATE TABLE " + tableName + " ( serde_id bigint COMMENT 'from 
deserializer', name string "
++ "COMMENT 'from deserializer', slib string COMMENT 'from 
deserializer') "
++ "ROW FORMAT SERDE 
'org.apache.hive.storage.jdbc.JdbcSerDe' "
++ "STORED BY 
'org.apache.hive.storage.jdbc.JdbcStorageHandler' "
++ "WITH SERDEPROPERTIES ('serialization.format'='1') "
++ "TBLPROPERTIES ( "
++ "'hive.sql.database.type'='METASTORE', "
++ "'hive.sql.query'='SELECT \"SERDE_ID\", \"NAME\", 
\"SLIB\" FROM \"SERDES\"')";
+
+srcHiveWarehouse.run("use " + dbName)
+.run(createTableQuery)
+.runFailure("export table " + tableName + " to '" + exportPath + 
"'")
+.run("export table " + tableName + " to '" + exportMetaPath + "'" 
+ " for metadata replication('1')");
+
+destHiveWarehouse.run("use " + replDbName)
+.runFailure("import table " +  tableName + " from '" + exportPath 
+ "'")
+.run("show tables")
+.verifyFailure(new String[] {tableName})
+.run("import table " + tableName + " from '" + exportMetaPath + 
"'")
+.run("show tables")
+.verifyResult(tableName);
+
+// check physical path
+Path checkPath = new Path(exportPath);
+checkPath = new Path(checkPath, EximUtil.DATA_PATH_NAME);
+FileSystem fs = checkPath.getFileSystem(srcHiveWarehouse.hiveConf);
+assertFalse(fs.exists(checkPath));
+checkPath = new Path(exportMetaPath);
+checkPath = new Path(checkPath, EximUtil.METADATA_NAME);
+assertTrue(fs.exists(checkPath));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/6a16a71c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
index 26e308c..0f67174 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
@@ -797,7 +797,7 @@ public class TestReplicationScenariosAcrossInstances {
   }
 
   @Test
-  public 

[28/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out 
b/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
index ad8aef0..b8ea5cf 100644
--- a/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
@@ -80,11 +80,13 @@ STORED AS ORC tblproperties("orc.row.index.stride" = 
"1000", "orc.bloom.filter.c
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orc_ppd_staging_n1
-PREHOOK: query: insert overwrite table orc_ppd_staging_n1 select t, si, i, b, 
f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), 
`dec`, bin from staging_n7 order by t, s
+PREHOOK: query: insert overwrite table orc_ppd_staging_n1 select t, si, i, b, 
f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, cast(ts as date) as da, `dec`, bin from 
staging_n7 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
 PREHOOK: type: QUERY
 PREHOOK: Input: default@staging_n7
 PREHOOK: Output: default@orc_ppd_staging_n1
-POSTHOOK: query: insert overwrite table orc_ppd_staging_n1 select t, si, i, b, 
f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), 
`dec`, bin from staging_n7 order by t, s
+POSTHOOK: query: insert overwrite table orc_ppd_staging_n1 select t, si, i, b, 
f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, cast(ts as date) as da, `dec`, bin from 
staging_n7 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@staging_n7
 POSTHOOK: Output: default@orc_ppd_staging_n1
@@ -177,11 +179,13 @@ STORED AS ORC tblproperties("orc.row.index.stride" = 
"1000", "orc.bloom.filter.c
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orc_ppd_n2
-PREHOOK: query: insert overwrite table orc_ppd_n2 select t, si, i, b, f, d, 
bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from 
orc_ppd_staging_n1 order by t, s
+PREHOOK: query: insert overwrite table orc_ppd_n2 select t, si, i, b, f, d, 
bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, da, `dec`, bin from orc_ppd_staging_n1 order by 
t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
 PREHOOK: type: QUERY
 PREHOOK: Input: default@orc_ppd_staging_n1
 PREHOOK: Output: default@orc_ppd_n2
-POSTHOOK: query: insert overwrite table orc_ppd_n2 select t, si, i, b, f, d, 
bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from 
orc_ppd_staging_n1 order by t, s
+POSTHOOK: query: insert overwrite table orc_ppd_n2 select t, si, i, b, f, d, 
bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, da, `dec`, bin from orc_ppd_staging_n1 order by 
t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orc_ppd_staging_n1
 POSTHOOK: Output: default@orc_ppd_n2
@@ -203,7 +207,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@orc_ppd_n2
 PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
-   HDFS_BYTES_READ: 16676
+   HDFS_BYTES_READ: 16675
HDFS_BYTES_WRITTEN: 104
HDFS_READ_OPS: 7
HDFS_LARGE_READ_OPS: 0
@@ -895,7 +899,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@orc_ppd_n2
 PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
-   HDFS_BYTES_READ: 5691
+   HDFS_BYTES_READ: 5911
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -916,9 +920,9 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_TS_0: 1000
 Stage-1 LLAP IO COUNTERS:
ALLOCATED_BYTES: 1310720
-   ALLOCATED_USED_BYTES: 13796
+   ALLOCATED_USED_BYTES: 13810
CACHE_HIT_BYTES: 24
-   CACHE_MISS_BYTES: 5691
+   CACHE_MISS_BYTES: 5911
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
NUM_VECTOR_BATCHES: 1
@@ -955,7 +959,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 6
RECORDS_OUT_OPERATOR_TS_0: 1000
 Stage-1 LLAP IO COUNTERS:
-   CACHE_HIT_BYTES: 5715
+   CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -993,7 +997,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 6
RECORDS_OUT_OPERATOR_TS_0: 1000
 Stage-1 LLAP IO COUNTERS:
-   CACHE_HIT_BYTES: 5715
+   CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1053,7 +1057,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 2100
RECORDS_OUT_OPERATOR_TS_0: 2100
 Stage-1 LLAP IO COUNTERS:
-   CACHE_HIT_BYTES: 5715
+   CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 3
@@ -1091,7 +1095,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 0
RECORDS_OUT_OPERATOR_TS_0: 0
 Stage-1 LLAP IO COUNTERS:
-   CACHE_HIT_BYTES: 1735
+   CACHE_HIT_BYTES: 

[36/67] [abbrv] hive git commit: HIVE-19366: Vectorization causing TestStreaming.testStreamBucketingMatchesRegularBucketing to fail (Prasanth Jayachandran reviewed by Eugene Koifman)

2018-06-18 Thread sershe
HIVE-19366: Vectorization causing 
TestStreaming.testStreamBucketingMatchesRegularBucketing to fail (Prasanth 
Jayachandran reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3eaca1f4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3eaca1f4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3eaca1f4

Branch: refs/heads/master-txnstats
Commit: 3eaca1f4b29fcace7379675f530dfdc7434b862d
Parents: b100483
Author: Prasanth Jayachandran 
Authored: Sat Jun 16 20:54:31 2018 -0700
Committer: Prasanth Jayachandran 
Committed: Sat Jun 16 20:54:31 2018 -0700

--
 .../src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3eaca1f4/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
--
diff --git 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index 13aa5e9..5e5bc83 100644
--- 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -439,8 +439,6 @@ public class TestStreaming {
 String tableLoc  = "'" + dbUri + Path.SEPARATOR + "streamedtable" + "'";
 String tableLoc2 = "'" + dbUri + Path.SEPARATOR + "finaltable" + "'";
 String tableLoc3 = "'" + dbUri + Path.SEPARATOR + "nobucket" + "'";
-// disabling vectorization as this test yields incorrect results with 
vectorization
-conf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false);
 try (IDriver driver = DriverFactory.newDriver(conf)) {
   runDDL(driver, "create database testBucketing3");
   runDDL(driver, "use testBucketing3");



[57/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 672ebf9..c6ce900 100644
--- 
a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -208,7 +208,7 @@ import org.slf4j.LoggerFactory;
 
 public void alter_partitions(String db_name, String tbl_name, 
List new_parts) throws InvalidOperationException, MetaException, 
org.apache.thrift.TException;
 
-public void alter_partitions_with_environment_context(String db_name, 
String tbl_name, List new_parts, EnvironmentContext 
environment_context) throws InvalidOperationException, MetaException, 
org.apache.thrift.TException;
+public AlterPartitionsResponse 
alter_partitions_with_environment_context(AlterPartitionsRequest req) throws 
InvalidOperationException, MetaException, org.apache.thrift.TException;
 
 public void alter_partition_with_environment_context(String db_name, 
String tbl_name, Partition new_part, EnvironmentContext environment_context) 
throws InvalidOperationException, MetaException, org.apache.thrift.TException;
 
@@ -626,7 +626,7 @@ import org.slf4j.LoggerFactory;
 
 public void alter_partitions(String db_name, String tbl_name, 
List new_parts, org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
 
-public void alter_partitions_with_environment_context(String db_name, 
String tbl_name, List new_parts, EnvironmentContext 
environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) 
throws org.apache.thrift.TException;
+public void 
alter_partitions_with_environment_context(AlterPartitionsRequest req, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
 
 public void alter_partition_with_environment_context(String db_name, 
String tbl_name, Partition new_part, EnvironmentContext environment_context, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
 
@@ -3401,33 +3401,33 @@ import org.slf4j.LoggerFactory;
   return;
 }
 
-public void alter_partitions_with_environment_context(String db_name, 
String tbl_name, List new_parts, EnvironmentContext 
environment_context) throws InvalidOperationException, MetaException, 
org.apache.thrift.TException
+public AlterPartitionsResponse 
alter_partitions_with_environment_context(AlterPartitionsRequest req) throws 
InvalidOperationException, MetaException, org.apache.thrift.TException
 {
-  send_alter_partitions_with_environment_context(db_name, tbl_name, 
new_parts, environment_context);
-  recv_alter_partitions_with_environment_context();
+  send_alter_partitions_with_environment_context(req);
+  return recv_alter_partitions_with_environment_context();
 }
 
-public void send_alter_partitions_with_environment_context(String db_name, 
String tbl_name, List new_parts, EnvironmentContext 
environment_context) throws org.apache.thrift.TException
+public void 
send_alter_partitions_with_environment_context(AlterPartitionsRequest req) 
throws org.apache.thrift.TException
 {
   alter_partitions_with_environment_context_args args = new 
alter_partitions_with_environment_context_args();
-  args.setDb_name(db_name);
-  args.setTbl_name(tbl_name);
-  args.setNew_parts(new_parts);
-  args.setEnvironment_context(environment_context);
+  args.setReq(req);
   sendBase("alter_partitions_with_environment_context", args);
 }
 
-public void recv_alter_partitions_with_environment_context() throws 
InvalidOperationException, MetaException, org.apache.thrift.TException
+public AlterPartitionsResponse 
recv_alter_partitions_with_environment_context() throws 
InvalidOperationException, MetaException, org.apache.thrift.TException
 {
   alter_partitions_with_environment_context_result result = new 
alter_partitions_with_environment_context_result();
   receiveBase(result, "alter_partitions_with_environment_context");
+  if (result.isSetSuccess()) {
+return result.success;
+  }
   if (result.o1 != null) {
 throw result.o1;
   }
   if (result.o2 != null) {
 throw result.o2;
   }
-  return;
+  throw new 
org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT,
 "alter_partitions_with_environment_context failed: unknown result");
 }
 
 public void 

[48/67] [abbrv] hive git commit: HIVE-19786: RpcServer cancelTask log message is incorrect (Bharathkrishna Guruvayoor Murali, reviewed by Sahil Takiar)

2018-06-18 Thread sershe
HIVE-19786: RpcServer cancelTask log message is incorrect (Bharathkrishna 
Guruvayoor Murali, reviewed by Sahil Takiar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4810511d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4810511d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4810511d

Branch: refs/heads/master-txnstats
Commit: 4810511d6e2b4377b20d70122788d5ad300d8df1
Parents: 8c07676
Author: Bharathkrishna Guruvayoor Murali 
Authored: Mon Jun 18 10:17:11 2018 -0500
Committer: Sahil Takiar 
Committed: Mon Jun 18 10:17:11 2018 -0500

--
 .../src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4810511d/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java
--
diff --git 
a/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java 
b/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java
index f1383d6..babcb54 100644
--- a/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java
+++ b/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java
@@ -101,7 +101,8 @@ public class RpcServer implements Closeable {
 Runnable cancelTask = new Runnable() {
 @Override
 public void run() {
-  LOG.warn("Timed out waiting for test message from Remote 
Spark driver.");
+  LOG.warn("Timed out waiting for the completion of SASL 
negotiation "
+  + "between HiveServer2 and the Remote Spark 
Driver.");
   newRpc.close();
 }
 };



[52/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index da41e6e..626e103 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -791,6 +791,50 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient, AutoCloseable {
   }
 
   @Override
+  public Map> getPartitionColumnStatistics(
+  String dbName, String tableName, List partNames, List 
colNames,
+  long txnId, String validWriteIdList)
+  throws NoSuchObjectException, MetaException, TException {
+return getPartitionColumnStatistics(getDefaultCatalog(conf), dbName, 
tableName,
+partNames, colNames, txnId, validWriteIdList);
+  }
+
+  @Override
+  public Map> getPartitionColumnStatistics(
+  String catName, String dbName, String tableName, List partNames,
+  List colNames, long txnId, String validWriteIdList)
+  throws NoSuchObjectException, MetaException, TException {
+PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, 
tableName, colNames,
+partNames);
+rqst.setCatName(catName);
+rqst.setTxnId(txnId);
+rqst.setValidWriteIdList(validWriteIdList);
+return client.get_partitions_statistics_req(rqst).getPartStats();
+  }
+
+  @Override
+  public AggrStats getAggrColStatsFor(String dbName, String tblName, 
List colNames,
+  List partNames, long txnId, String writeIdList)
+  throws NoSuchObjectException, MetaException, TException {
+return getAggrColStatsFor(getDefaultCatalog(conf), dbName, tblName, 
colNames,
+partNames, txnId, writeIdList);  }
+
+  @Override
+  public AggrStats getAggrColStatsFor(String catName, String dbName, String 
tblName, List colNames,
+  List partNames, long txnId, String writeIdList)
+  throws NoSuchObjectException, MetaException, TException {
+if (colNames.isEmpty() || partNames.isEmpty()) {
+  LOG.debug("Columns is empty or partNames is empty : Short-circuiting 
stats eval on client side.");
+  return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate
+}
+PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, 
colNames, partNames);
+req.setCatName(catName);
+req.setTxnId(txnId);
+req.setValidWriteIdList(writeIdList);
+return client.get_aggr_stats_for(req);
+  }
+
+  @Override
   public List exchange_partitions(Map 
partitionSpecs, String sourceCat,
  String sourceDb, String 
sourceTable, String destCat,
  String destDb, String 
destTableName) throws TException {
@@ -1584,6 +1628,14 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient, AutoCloseable {
   }
 
   @Override
+  public Table getTable(String dbname, String name,
+ long txnId, String validWriteIdList)
+  throws MetaException, TException, NoSuchObjectException{
+return getTable(getDefaultCatalog(conf), dbname, name,
+txnId, validWriteIdList);
+  };
+
+  @Override
   public Table getTable(String catName, String dbName, String tableName) 
throws TException {
 GetTableRequest req = new GetTableRequest(dbName, tableName);
 req.setCatName(catName);
@@ -1593,6 +1645,18 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient, AutoCloseable {
   }
 
   @Override
+  public Table getTable(String catName, String dbName, String tableName,
+long txnId, String validWriteIdList) throws TException {
+GetTableRequest req = new GetTableRequest(dbName, tableName);
+req.setCatName(catName);
+req.setCapabilities(version);
+req.setTxnId(txnId);
+req.setValidWriteIdList(validWriteIdList);
+Table t = client.get_table_req(req).getTable();
+return deepCopy(filterHook.filterTable(t));
+  }
+
+  @Override
   public List getTableObjectsByName(String dbName, List 
tableNames)
   throws TException {
 return getTableObjectsByName(getDefaultCatalog(conf), dbName, tableNames);
@@ -1821,21 +1885,42 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient, AutoCloseable {
   @Override
   public void alter_partitions(String dbName, String tblName, List 
newParts)
   throws TException {
-alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, null);
+alter_partitions(
+getDefaultCatalog(conf), dbName, tblName, newParts, null, -1, null);
   }
 
   @Override
   public void alter_partitions(String dbName, String tblName, List 
newParts,
EnvironmentContext 

[59/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
index 51f809a..5b40d2f 100644
--- 
a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
+++ 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
@@ -47,6 +47,9 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC 
= new org.apache.thrift.protocol.TField("parameters", 
org.apache.thrift.protocol.TType.MAP, (short)7);
   private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC 
= new org.apache.thrift.protocol.TField("privileges", 
org.apache.thrift.protocol.TType.STRUCT, (short)8);
   private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = 
new org.apache.thrift.protocol.TField("catName", 
org.apache.thrift.protocol.TType.STRING, (short)9);
+  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = 
new org.apache.thrift.protocol.TField("txnId", 
org.apache.thrift.protocol.TType.I64, (short)10);
+  private static final org.apache.thrift.protocol.TField 
VALID_WRITE_ID_LIST_FIELD_DESC = new 
org.apache.thrift.protocol.TField("validWriteIdList", 
org.apache.thrift.protocol.TType.STRING, (short)11);
+  private static final org.apache.thrift.protocol.TField 
IS_STATS_COMPLIANT_FIELD_DESC = new 
org.apache.thrift.protocol.TField("isStatsCompliant", 
org.apache.thrift.protocol.TType.I32, (short)12);
 
   private static final Map, SchemeFactory> schemes = 
new HashMap, SchemeFactory>();
   static {
@@ -63,6 +66,9 @@ import org.slf4j.LoggerFactory;
   private Map parameters; // required
   private PrincipalPrivilegeSet privileges; // optional
   private String catName; // optional
+  private long txnId; // optional
+  private String validWriteIdList; // optional
+  private IsolationLevelCompliance isStatsCompliant; // optional
 
   /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -74,7 +80,14 @@ import org.slf4j.LoggerFactory;
 SD((short)6, "sd"),
 PARAMETERS((short)7, "parameters"),
 PRIVILEGES((short)8, "privileges"),
-CAT_NAME((short)9, "catName");
+CAT_NAME((short)9, "catName"),
+TXN_ID((short)10, "txnId"),
+VALID_WRITE_ID_LIST((short)11, "validWriteIdList"),
+/**
+ * 
+ * @see IsolationLevelCompliance
+ */
+IS_STATS_COMPLIANT((short)12, "isStatsCompliant");
 
 private static final Map byName = new HashMap();
 
@@ -107,6 +120,12 @@ import org.slf4j.LoggerFactory;
   return PRIVILEGES;
 case 9: // CAT_NAME
   return CAT_NAME;
+case 10: // TXN_ID
+  return TXN_ID;
+case 11: // VALID_WRITE_ID_LIST
+  return VALID_WRITE_ID_LIST;
+case 12: // IS_STATS_COMPLIANT
+  return IS_STATS_COMPLIANT;
 default:
   return null;
   }
@@ -149,8 +168,9 @@ import org.slf4j.LoggerFactory;
   // isset id assignments
   private static final int __CREATETIME_ISSET_ID = 0;
   private static final int __LASTACCESSTIME_ISSET_ID = 1;
+  private static final int __TXNID_ISSET_ID = 2;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = 
{_Fields.PRIVILEGES,_Fields.CAT_NAME};
+  private static final _Fields optionals[] = 
{_Fields.PRIVILEGES,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> 
metaDataMap;
   static {
 Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new 
EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -175,11 +195,19 @@ import org.slf4j.LoggerFactory;
 new 
org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
 PrincipalPrivilegeSet.class)));
 tmpMap.put(_Fields.CAT_NAME, new 
org.apache.thrift.meta_data.FieldMetaData("catName", 
org.apache.thrift.TFieldRequirementType.OPTIONAL, 
 new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+tmpMap.put(_Fields.TXN_ID, new 
org.apache.thrift.meta_data.FieldMetaData("txnId", 
org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new 
org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", 

[67/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
HIVE-19532 : 04 patch (Steve Yeom)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1d46608e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1d46608e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1d46608e

Branch: refs/heads/master-txnstats
Commit: 1d46608e89c26ed123e96c3b79ef59b50d2349a6
Parents: 1a610cc
Author: sergey 
Authored: Mon Jun 18 14:50:31 2018 -0700
Committer: sergey 
Committed: Mon Jun 18 14:51:14 2018 -0700

--
 .../listener/DummyRawStoreFailEvent.java|   45 +-
 pom.xml |2 +-
 .../hive/ql/exec/ColumnStatsUpdateTask.java |3 +
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |3 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |  110 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java|7 +
 .../hadoop/hive/ql/lockmgr/DummyTxnManager.java |1 +
 .../hadoop/hive/ql/lockmgr/HiveTxnManager.java  |1 -
 .../apache/hadoop/hive/ql/metadata/Hive.java|  297 +-
 .../hive/ql/optimizer/StatsOptimizer.java   |   56 +-
 .../hive/ql/stats/BasicStatsNoJobTask.java  |4 +-
 .../hadoop/hive/ql/stats/BasicStatsTask.java|   15 +-
 .../hadoop/hive/ql/stats/ColStatsProcessor.java |7 +
 .../test/queries/clientpositive/stats_nonpart.q |   53 +
 ql/src/test/queries/clientpositive/stats_part.q |   98 +
 .../test/queries/clientpositive/stats_part2.q   |  100 +
 .../test/queries/clientpositive/stats_sizebug.q |   37 +
 .../results/clientpositive/stats_nonpart.q.out  |  500 ++
 .../results/clientpositive/stats_part.q.out |  660 ++
 .../results/clientpositive/stats_part2.q.out| 1261 
 .../results/clientpositive/stats_sizebug.q.out  |  216 +
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 2426 ---
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h|   70 +-
 .../ThriftHiveMetastore_server.skeleton.cpp |2 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp | 6383 ++
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |  412 +-
 .../metastore/api/AddPartitionsRequest.java |  215 +-
 .../hive/metastore/api/AddPartitionsResult.java |  126 +-
 .../hadoop/hive/metastore/api/AggrStats.java|  124 +-
 .../metastore/api/AlterPartitionsRequest.java   |  966 +++
 .../metastore/api/AlterPartitionsResponse.java  |  283 +
 .../hive/metastore/api/ColumnStatistics.java|  335 +-
 .../hive/metastore/api/GetTableRequest.java |  219 +-
 .../hive/metastore/api/GetTableResult.java  |  124 +-
 .../metastore/api/IsolationLevelCompliance.java |   48 +
 .../hadoop/hive/metastore/api/Partition.java|  333 +-
 .../hive/metastore/api/PartitionSpec.java   |  337 +-
 .../metastore/api/PartitionsStatsRequest.java   |  219 +-
 .../metastore/api/PartitionsStatsResult.java|  124 +-
 .../api/SetPartitionsStatsRequest.java  |  215 +-
 .../apache/hadoop/hive/metastore/api/Table.java |  333 +-
 .../hive/metastore/api/TableStatsRequest.java   |  219 +-
 .../hive/metastore/api/TableStatsResult.java|  124 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 2553 ---
 .../gen-php/metastore/ThriftHiveMetastore.php   | 1231 ++--
 .../src/gen/thrift/gen-php/metastore/Types.php  |  905 +++
 .../hive_metastore/ThriftHiveMetastore-remote   |8 +-
 .../hive_metastore/ThriftHiveMetastore.py   |  834 ++-
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  590 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |  162 +-
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   27 +-
 .../hadoop/hive/metastore/AlterHandler.java |2 +-
 .../hadoop/hive/metastore/HiveAlterHandler.java |   20 +-
 .../hadoop/hive/metastore/HiveMetaStore.java|  108 +-
 .../hive/metastore/HiveMetaStoreClient.java |  118 +-
 .../hadoop/hive/metastore/IHMSHandler.java  |5 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |   45 +-
 .../hadoop/hive/metastore/ObjectStore.java  |  487 +-
 .../apache/hadoop/hive/metastore/RawStore.java  |  150 +-
 .../hive/metastore/cache/CachedStore.java   |  140 +-
 .../hadoop/hive/metastore/model/MPartition.java |   18 +-
 .../model/MPartitionColumnStatistics.java   |9 +
 .../hadoop/hive/metastore/model/MTable.java |   19 +
 .../metastore/model/MTableColumnStatistics.java |9 +
 .../metastore/txn/CompactionTxnHandler.java |   66 +-
 .../hadoop/hive/metastore/txn/TxnDbUtil.java|   94 +
 .../hadoop/hive/metastore/txn/TxnUtils.java |   20 +-
 .../src/main/resources/package.jdo  |   18 +
 .../main/sql/derby/hive-schema-3.0.0.derby.sql  |   11 +-
 .../main/sql/derby/hive-schema-4.0.0.derby.sql  |   10 +-
 .../sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql  |8 +-
 .../main/sql/mssql/hive-schema-3.0.0.mssql.sql  |   14 +-
 .../main/sql/mssql/hive-schema-4.0.0.mssql.sql  |   14 +-
 .../sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql  |8 +
 

[19/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out 
b/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
index f1db9af..d739408 100644
--- a/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
@@ -3372,8 +3372,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -3402,8 +3402,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -3485,8 +3485,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -3515,8 +3515,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -3600,8 +3600,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -3630,8 +3630,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -3710,8 +3710,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -3740,8 +3740,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -3828,8 +3828,8 @@ STAGE PLANS:
 Map Vectorization:
  

[46/67] [abbrv] hive git commit: HIVE-19602: Refactor inplace progress code in Hive-on-spark progress monitor to use ProgressMonitor instance (Bharathkrishna Guruvayoor Murali, reviewed by Sahil Takia

2018-06-18 Thread sershe
HIVE-19602: Refactor inplace progress code in Hive-on-spark progress monitor to 
use ProgressMonitor instance (Bharathkrishna Guruvayoor Murali, reviewed by 
Sahil Takiar, Rui Li)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c89cf6d5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c89cf6d5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c89cf6d5

Branch: refs/heads/master-txnstats
Commit: c89cf6d5de0343493dc629a0073b5c8e88359a6e
Parents: 3a6ad26
Author: Bharathkrishna Guruvayoor Murali 
Authored: Mon Jun 18 10:03:01 2018 -0500
Committer: Sahil Takiar 
Committed: Mon Jun 18 10:03:01 2018 -0500

--
 .../ql/exec/spark/status/SparkJobMonitor.java   | 166 +--
 .../exec/spark/status/SparkProgressMonitor.java | 155 +
 2 files changed, 160 insertions(+), 161 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/c89cf6d5/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
index e78b1cd..3531ac2 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
@@ -22,13 +22,9 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.common.log.InPlaceUpdate;
 import org.apache.hadoop.hive.ql.log.PerfLogger;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.fusesource.jansi.Ansi;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.PrintStream;
-import java.text.DecimalFormat;
-import java.text.NumberFormat;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.HashSet;
@@ -38,8 +34,6 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.concurrent.TimeUnit;
 
-import static org.fusesource.jansi.Ansi.ansi;
-
 abstract class SparkJobMonitor {
 
   protected static final String CLASS_NAME = SparkJobMonitor.class.getName();
@@ -48,6 +42,7 @@ abstract class SparkJobMonitor {
   protected final PerfLogger perfLogger = SessionState.getPerfLogger();
   protected final int checkInterval = 1000;
   protected final long monitorTimeoutInterval;
+  private final InPlaceUpdate inPlaceUpdateFn;
 
   private final Set completed = new HashSet();
   private final int printInterval = 3000;
@@ -61,94 +56,20 @@ abstract class SparkJobMonitor {
 FINISHED
   }
 
-  // in-place progress update related variables
   protected final boolean inPlaceUpdate;
-  private int lines = 0;
-  private final PrintStream out;
-
-  private static final int COLUMN_1_WIDTH = 16;
-  private static final String HEADER_FORMAT = "%16s%10s %13s  %5s  %9s  %7s  
%7s  %6s  ";
-  private static final String STAGE_FORMAT = "%-16s%10s %13s  %5s  %9s  %7s  
%7s  %6s  ";
-  private static final String HEADER = String.format(HEADER_FORMAT,
-  "STAGES", "ATTEMPT", "STATUS", "TOTAL", "COMPLETED", "RUNNING", 
"PENDING", "FAILED");
-  private static final int SEPARATOR_WIDTH = 86;
-  private static final String SEPARATOR = new String(new 
char[SEPARATOR_WIDTH]).replace("\0", "-");
-  private static final String FOOTER_FORMAT = "%-15s  %-30s %-4s  %-25s";
-  private static final int progressBarChars = 30;
-
-  private final NumberFormat secondsFormat = new DecimalFormat("#0.00");
 
   protected SparkJobMonitor(HiveConf hiveConf) {
 monitorTimeoutInterval = hiveConf.getTimeVar(
 HiveConf.ConfVars.SPARK_JOB_MONITOR_TIMEOUT, TimeUnit.SECONDS);
 inPlaceUpdate = InPlaceUpdate.canRenderInPlace(hiveConf) && 
!SessionState.getConsole().getIsSilent();
 console = new SessionState.LogHelper(LOG);
-out = SessionState.LogHelper.getInfoStream();
+inPlaceUpdateFn = new 
InPlaceUpdate(SessionState.LogHelper.getInfoStream());
   }
 
   public abstract int startMonitor();
 
   private void printStatusInPlace(Map 
progressMap) {
-
-StringBuilder reportBuffer = new StringBuilder();
-
-// Num of total and completed tasks
-int sumTotal = 0;
-int sumComplete = 0;
-
-// position the cursor to line 0
-repositionCursor();
-
-// header
-reprintLine(SEPARATOR);
-reprintLineWithColorAsBold(HEADER, Ansi.Color.CYAN);
-reprintLine(SEPARATOR);
-
-SortedSet keys = new TreeSet(progressMap.keySet());
-int idx = 0;
-final int numKey = keys.size();
-for (SparkStage stage : keys) {
-  SparkStageProgress progress = progressMap.get(stage);
-  final int complete = progress.getSucceededTaskCount();
-  final int total = progress.getTotalTaskCount();
-  final int running = 

[51/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
index 4e3068d..f2642cf 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.metastore.txn;
 
+import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.common.classification.RetrySemantics;
 import org.apache.hadoop.hive.metastore.api.CompactionType;
 import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -576,8 +577,8 @@ class CompactionTxnHandler extends TxnHandler {
 dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
 stmt = dbConn.createStatement();
 String s = "select txn_id from TXNS where " +
-  "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " +
-  "txn_state = '" + TXN_ABORTED + "'";
+"txn_id not in (select tc_txnid from TXN_COMPONENTS) and " +
+"txn_state = '" + TXN_ABORTED + "'";
 LOG.debug("Going to execute query <" + s + ">");
 rs = stmt.executeQuery(s);
 List txnids = new ArrayList<>();
@@ -587,10 +588,71 @@ class CompactionTxnHandler extends TxnHandler {
   return;
 }
 Collections.sort(txnids);//easier to read logs
+
 List queries = new ArrayList<>();
 StringBuilder prefix = new StringBuilder();
 StringBuilder suffix = new StringBuilder();
 
+// Turn off COLUMN_STATS_ACCURATE for txnids' components in TBLS and 
PARTITIONS
+for (Long txnId : txnids) {
+  // Get table ids for the current txnId.
+  s = "select tbl_id from TBLS where txn_id = " + txnId;
+  LOG.debug("Going to execute query <" + s + ">");
+  rs = stmt.executeQuery(s);
+  List tblIds = new ArrayList<>();
+  while (rs.next()) {
+tblIds.add(rs.getLong(1));
+  }
+  close(rs);
+  if(tblIds.size() <= 0) {
+continue;
+  }
+
+  // Update COLUMN_STATS_AcCURATE.BASIC_STATS to false for each 
tableId.
+  prefix.append("delete from TABLE_PARAMS " +
+  " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and ");
+  suffix.append("");
+  TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, 
tblIds, "tbl_id", true, false);
+
+  for (String query : queries) {
+LOG.debug("Going to execute update <" + query + ">");
+int rc = stmt.executeUpdate(query);
+LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS 
states from TBLS");
+  }
+
+  queries.clear();
+  prefix.setLength(0);
+  suffix.setLength(0);
+
+  // Get partition ids for the current txnId.
+  s = "select part_id from PARTITIONS where txn_id = " + txnId;
+  LOG.debug("Going to execute query <" + s + ">");
+  rs = stmt.executeQuery(s);
+  List ptnIds = new ArrayList<>();
+  while (rs.next()) ptnIds.add(rs.getLong(1));
+  close(rs);
+  if(ptnIds.size() <= 0) {
+continue;
+  }
+
+  // Update COLUMN_STATS_AcCURATE.BASIC_STATS to false for each ptnId.
+  prefix.append("delete from PARTITION_PARAMS " +
+  " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and ");
+  suffix.append("");
+  TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, 
ptnIds, "part_id", true, false);
+
+  for (String query : queries) {
+LOG.debug("Going to execute update <" + query + ">");
+int rc = stmt.executeUpdate(query);
+LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS 
states from PARTITIONS");
+  }
+
+  queries.clear();
+  prefix.setLength(0);
+  suffix.setLength(0);
+}
+
+// Delete from TXNS.
 prefix.append("delete from TXNS where ");
 suffix.append("");
 

http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index 50bfca3..bfbd928 100644
--- 

[38/67] [abbrv] hive git commit: HIVE-19904 : Load data rewrite into Tez job fails for ACID (Deepak Jaiswal, reviewed by Eugene Koifman)

2018-06-18 Thread sershe
HIVE-19904 : Load data rewrite into Tez job fails for ACID (Deepak Jaiswal, 
reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/24da4603
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/24da4603
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/24da4603

Branch: refs/heads/master-txnstats
Commit: 24da46034fb47aa6ece963b2d08ebc3c1362961e
Parents: d60bc73
Author: Deepak Jaiswal 
Authored: Sun Jun 17 10:49:48 2018 -0700
Committer: Deepak Jaiswal 
Committed: Sun Jun 17 10:49:48 2018 -0700

--
 .../hive/ql/parse/LoadSemanticAnalyzer.java |   2 +-
 .../apache/hadoop/hive/ql/TestTxnLoadData.java  |   6 +-
 .../llap/load_data_using_job.q.out  | 108 +--
 3 files changed, 58 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/24da4603/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index 189975e..cbacd05 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -75,7 +75,7 @@ public class LoadSemanticAnalyzer extends SemanticAnalyzer {
   private static final Logger LOG = 
LoggerFactory.getLogger(LoadSemanticAnalyzer.class);
   private boolean queryReWritten = false;
 
-  private final String tempTblNameSuffix = "__TEMP_TABLE_FOR_LOAD_DATA__";
+  private final String tempTblNameSuffix = "__temp_table_for_load_data__";
 
   // AST specific data
   private Tree fromTree, tableTree;

http://git-wip-us.apache.org/repos/asf/hive/blob/24da4603/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
--
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
index fb88f25..45f9e52 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
@@ -378,10 +378,10 @@ public class TestTxnLoadData extends 
TxnCommandsBaseForTests {
 runStatementOnDriver("create table Tstage (a int, b int) stored as orc 
tblproperties('transactional'='false')");
 //this creates an ORC data file with correct schema under table root
 runStatementOnDriver("insert into Tstage values(1,2),(3,4)");
-CommandProcessorResponse cpr = runStatementOnDriverNegative("load data 
local inpath '" + getWarehouseDir() + "' into table T");
-// This condition should not occur with the new support of rewriting load 
into IAS.
-Assert.assertFalse(cpr.getErrorMessage().contains("Load into bucketed 
tables are disabled"));
+// This will work with the new support of rewriting load into IAS.
+runStatementOnDriver("load data local inpath '" + getWarehouseDir() + 
"/Tstage' into table T");
   }
+
   private void checkExpected(List rs, String[][] expected, String msg) 
{
 super.checkExpected(rs, expected, msg, LOG, true);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/24da4603/ql/src/test/results/clientpositive/llap/load_data_using_job.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/load_data_using_job.q.out 
b/ql/src/test/results/clientpositive/llap/load_data_using_job.q.out
index 7a62be2..21fd933 100644
--- a/ql/src/test/results/clientpositive/llap/load_data_using_job.q.out
+++ b/ql/src/test/results/clientpositive/llap/load_data_using_job.q.out
@@ -240,14 +240,14 @@ STAGE PLANS:
 
 PREHOOK: query: load data local inpath 
'../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE 
srcbucket_mapjoin_n8
 PREHOOK: type: QUERY
-PREHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+PREHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
 PREHOOK: Output: default@srcbucket_mapjoin_n8
 POSTHOOK: query: load data local inpath 
'../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE 
srcbucket_mapjoin_n8
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+POSTHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
 POSTHOOK: Output: default@srcbucket_mapjoin_n8@ds=2008-04-08
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08).key SIMPLE 
[(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key,
 type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08).value 

[42/67] [abbrv] hive git commit: HIVE-19909: qtests: retire hadoop_major version specific tests; and logics (Zoltan Haindrich reviewed by Teddy Choi)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
--
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q 
b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
index a5f5522..f933545 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
@@ -4,7 +4,6 @@ set hive.optimize.listbucketing=true;
 set mapred.input.dir.recursive=true;   
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) 
 -- SORT_QUERY_RESULTS
 
 -- List bucketing query logic test case. We simulate the directory structure 
by DML here.

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
--
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q 
b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
index 4020063..d5f6a26 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
@@ -4,7 +4,6 @@ set hive.optimize.listbucketing=true;
 set mapred.input.dir.recursive=true;   
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) 
 
 -- List bucketing query logic test case. We simulate the directory structure 
by DML here.
 -- Test condition: 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
--
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q 
b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
index 54ab75e..fc5815c 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
@@ -4,7 +4,6 @@ set hive.optimize.listbucketing=true;
 set mapred.input.dir.recursive=true;   
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) 
 -- SORT_QUERY_RESULTS
 
 -- List bucketing query logic test case. 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
--
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q 
b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
index 77974cf..bc4f96c 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
@@ -4,7 +4,6 @@ set hive.optimize.listbucketing=true;
 set mapred.input.dir.recursive=true;   
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) 
 -- SORT_QUERY_RESULTS
 
 -- List bucketing query logic test case. 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
--
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q 
b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
index bf6b227..64193f1 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
@@ -4,7 +4,6 @@ set hive.optimize.listbucketing=true;
 set mapred.input.dir.recursive=true;   
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) 
 -- SORT_QUERY_RESULTS
 
 -- List bucketing query logic test case. 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/loadpart_err.q
--
diff --git a/ql/src/test/queries/clientpositive/loadpart_err.q 
b/ql/src/test/queries/clientpositive/loadpart_err.q
deleted file mode 100644
index 1204622..000
--- a/ql/src/test/queries/clientpositive/loadpart_err.q
+++ /dev/null
@@ -1,21 +0,0 @@
---! qt:dataset:src
-set hive.cli.errors.ignore=true;
-
-ADD FILE ../../data/scripts/error_script;
-
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S, 0.23)
--- (this test is flaky so it is currently disabled for all Hadoop versions)
-
-CREATE TABLE loadpart1(a STRING, b STRING) PARTITIONED BY (ds STRING);
-
-INSERT OVERWRITE TABLE loadpart1 PARTITION (ds='2009-01-01')
-SELECT TRANSFORM(src.key, src.value) USING 'error_script' AS (tkey, tvalue)
-FROM src;
-
-DESCRIBE loadpart1;
-SHOW PARTITIONS loadpart1;
-

[49/67] [abbrv] hive git commit: HIVE-19921: Fix perf duration and queue name in HiveProtoLoggingHook (Harish JP, reviewd by Anishek Agarwal)

2018-06-18 Thread sershe
HIVE-19921: Fix perf duration and queue name in HiveProtoLoggingHook (Harish 
JP, reviewd by Anishek Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c4eb647c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c4eb647c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c4eb647c

Branch: refs/heads/master-txnstats
Commit: c4eb647c6da499541ce178bf82433c26f25e
Parents: 4810511
Author: Anishek Agarwal 
Authored: Mon Jun 18 09:08:34 2018 -0700
Committer: Anishek Agarwal 
Committed: Mon Jun 18 09:08:34 2018 -0700

--
 .../hive/ql/hooks/HiveProtoLoggingHook.java |  6 +++-
 .../hive/ql/hooks/TestHiveProtoLoggingHook.java | 29 +++-
 2 files changed, 27 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/c4eb647c/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java 
b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
index eef6ac9..bddca1a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
@@ -366,6 +366,7 @@ public class HiveProtoLoggingHook implements 
ExecuteWithHookContext {
   ApplicationId llapId = determineLlapId(conf, executionMode);
   if (llapId != null) {
 addMapEntry(builder, OtherInfoType.LLAP_APP_ID, llapId.toString());
+
builder.setQueue(conf.get(HiveConf.ConfVars.LLAP_DAEMON_QUEUE_NAME.varname));
   }
 
   conf.stripHiddenConfigurations(conf);
@@ -391,7 +392,10 @@ public class HiveProtoLoggingHook implements 
ExecuteWithHookContext {
 builder.setOperationId(hookContext.getOperationId());
   }
   addMapEntry(builder, OtherInfoType.STATUS, Boolean.toString(success));
-  JSONObject perfObj = new 
JSONObject(hookContext.getPerfLogger().getEndTimes());
+  JSONObject perfObj = new JSONObject();
+  for (String key : hookContext.getPerfLogger().getEndTimes().keySet()) {
+perfObj.put(key, hookContext.getPerfLogger().getDuration(key));
+  }
   addMapEntry(builder, OtherInfoType.PERF, perfObj.toString());
 
   return builder.build();

http://git-wip-us.apache.org/repos/asf/hive/blob/c4eb647c/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java 
b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java
index 98b73e8..96fb73c 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java
@@ -22,6 +22,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashSet;
+import java.util.Map;
 
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -46,6 +47,9 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
 
 public class TestHiveProtoLoggingHook {
 
@@ -106,6 +110,8 @@ public class TestHiveProtoLoggingHook {
   @Test
   public void testPostEventLog() throws Exception {
 context.setHookType(HookType.POST_EXEC_HOOK);
+context.getPerfLogger().PerfLogBegin("test", "LogTest");
+context.getPerfLogger().PerfLogEnd("test", "LogTest");
 
 EventLogger evtLogger = new EventLogger(conf, SystemClock.getInstance());
 evtLogger.handle(context);
@@ -119,7 +125,11 @@ public class TestHiveProtoLoggingHook {
 Assert.assertEquals("test_op_id", event.getOperationId());
 
 assertOtherInfo(event, OtherInfoType.STATUS, Boolean.TRUE.toString());
-assertOtherInfo(event, OtherInfoType.PERF, null);
+String val = findOtherInfo(event, OtherInfoType.PERF);
+Map map = new ObjectMapper().readValue(val,
+new TypeReference>() {});
+// This should be really close to zero.
+Assert.assertTrue("Expected LogTest in PERF", map.get("LogTest") < 100);
   }
 
   @Test
@@ -158,15 +168,20 @@ public class TestHiveProtoLoggingHook {
 return event;
   }
 
-  private void assertOtherInfo(HiveHookEventProto event, OtherInfoType key, 
String value) {
+  private String findOtherInfo(HiveHookEventProto event, OtherInfoType key) {
 for (MapFieldEntry otherInfo : event.getOtherInfoList()) {
   if (otherInfo.getKey().equals(key.name())) {
-if (value != null) {
-  Assert.assertEquals(value, 

[60/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
new file mode 100644
index 000..8d4102f
--- /dev/null
+++ 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
@@ -0,0 +1,966 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public 
@org.apache.hadoop.classification.InterfaceStability.Stable public class 
AlterPartitionsRequest implements 
org.apache.thrift.TBase, java.io.Serializable, Cloneable, 
Comparable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("AlterPartitionsRequest");
+
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = 
new org.apache.thrift.protocol.TField("dbName", 
org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC 
= new org.apache.thrift.protocol.TField("tableName", 
org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC 
= new org.apache.thrift.protocol.TField("partitions", 
org.apache.thrift.protocol.TType.LIST, (short)3);
+  private static final org.apache.thrift.protocol.TField 
ENVIRONMENT_CONTEXT_FIELD_DESC = new 
org.apache.thrift.protocol.TField("environmentContext", 
org.apache.thrift.protocol.TType.STRUCT, (short)4);
+  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = 
new org.apache.thrift.protocol.TField("txnId", 
org.apache.thrift.protocol.TType.I64, (short)5);
+  private static final org.apache.thrift.protocol.TField 
VALID_WRITE_ID_LIST_FIELD_DESC = new 
org.apache.thrift.protocol.TField("validWriteIdList", 
org.apache.thrift.protocol.TType.STRING, (short)6);
+
+  private static final Map, SchemeFactory> schemes = 
new HashMap, SchemeFactory>();
+  static {
+schemes.put(StandardScheme.class, new 
AlterPartitionsRequestStandardSchemeFactory());
+schemes.put(TupleScheme.class, new 
AlterPartitionsRequestTupleSchemeFactory());
+  }
+
+  private String dbName; // required
+  private String tableName; // required
+  private List partitions; // required
+  private EnvironmentContext environmentContext; // required
+  private long txnId; // optional
+  private String validWriteIdList; // optional
+
+  /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+DB_NAME((short)1, "dbName"),
+TABLE_NAME((short)2, "tableName"),
+PARTITIONS((short)3, "partitions"),
+ENVIRONMENT_CONTEXT((short)4, "environmentContext"),
+TXN_ID((short)5, "txnId"),
+VALID_WRITE_ID_LIST((short)6, "validWriteIdList");
+
+private static final Map byName = new HashMap();
+
+static {
+  for (_Fields field : EnumSet.allOf(_Fields.class)) {
+byName.put(field.getFieldName(), field);
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, or null if its not 
found.
+ */
+public static _Fields findByThriftId(int fieldId) {
+  switch(fieldId) {
+case 1: // DB_NAME
+  return DB_NAME;
+case 2: // TABLE_NAME
+  return TABLE_NAME;
+case 3: // PARTITIONS
+  return PARTITIONS;
+

[64/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp 
b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index e459bc2..7a81dfb 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -2334,14 +2334,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size1202;
-::apache::thrift::protocol::TType _etype1205;
-xfer += iprot->readListBegin(_etype1205, _size1202);
-this->success.resize(_size1202);
-uint32_t _i1206;
-for (_i1206 = 0; _i1206 < _size1202; ++_i1206)
+uint32_t _size1221;
+::apache::thrift::protocol::TType _etype1224;
+xfer += iprot->readListBegin(_etype1224, _size1221);
+this->success.resize(_size1221);
+uint32_t _i1225;
+for (_i1225 = 0; _i1225 < _size1221; ++_i1225)
 {
-  xfer += iprot->readString(this->success[_i1206]);
+  xfer += iprot->readString(this->success[_i1225]);
 }
 xfer += iprot->readListEnd();
   }
@@ -2380,10 +2380,10 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter1207;
-  for (_iter1207 = this->success.begin(); _iter1207 != 
this->success.end(); ++_iter1207)
+  std::vector ::const_iterator _iter1226;
+  for (_iter1226 = this->success.begin(); _iter1226 != 
this->success.end(); ++_iter1226)
   {
-xfer += oprot->writeString((*_iter1207));
+xfer += oprot->writeString((*_iter1226));
   }
   xfer += oprot->writeListEnd();
 }
@@ -2428,14 +2428,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 (*(this->success)).clear();
-uint32_t _size1208;
-::apache::thrift::protocol::TType _etype1211;
-xfer += iprot->readListBegin(_etype1211, _size1208);
-(*(this->success)).resize(_size1208);
-uint32_t _i1212;
-for (_i1212 = 0; _i1212 < _size1208; ++_i1212)
+uint32_t _size1227;
+::apache::thrift::protocol::TType _etype1230;
+xfer += iprot->readListBegin(_etype1230, _size1227);
+(*(this->success)).resize(_size1227);
+uint32_t _i1231;
+for (_i1231 = 0; _i1231 < _size1227; ++_i1231)
 {
-  xfer += iprot->readString((*(this->success))[_i1212]);
+  xfer += iprot->readString((*(this->success))[_i1231]);
 }
 xfer += iprot->readListEnd();
   }
@@ -2552,14 +2552,14 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size1213;
-::apache::thrift::protocol::TType _etype1216;
-xfer += iprot->readListBegin(_etype1216, _size1213);
-this->success.resize(_size1213);
-uint32_t _i1217;
-for (_i1217 = 0; _i1217 < _size1213; ++_i1217)
+uint32_t _size1232;
+::apache::thrift::protocol::TType _etype1235;
+xfer += iprot->readListBegin(_etype1235, _size1232);
+this->success.resize(_size1232);
+uint32_t _i1236;
+for (_i1236 = 0; _i1236 < _size1232; ++_i1236)
 {
-  xfer += iprot->readString(this->success[_i1217]);
+  xfer += iprot->readString(this->success[_i1236]);
 }
 xfer += iprot->readListEnd();
   }
@@ -2598,10 +2598,10 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter1218;
-  for (_iter1218 = this->success.begin(); _iter1218 != 
this->success.end(); ++_iter1218)
+  std::vector ::const_iterator _iter1237;
+  for (_iter1237 = this->success.begin(); _iter1237 != 
this->success.end(); ++_iter1237)
   {
-xfer += 

[47/67] [abbrv] hive git commit: HIVE-19787: Log message when spark-submit has completed (Bharathkrishna Guruvayoor Murali, reviewed by Sahil Takiar)

2018-06-18 Thread sershe
HIVE-19787: Log message when spark-submit has completed (Bharathkrishna 
Guruvayoor Murali, reviewed by Sahil Takiar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8c076762
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8c076762
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8c076762

Branch: refs/heads/master-txnstats
Commit: 8c0767625069418871194f418b99bce8cca1007b
Parents: c89cf6d
Author: Bharathkrishna Guruvayoor Murali 
Authored: Mon Jun 18 10:12:10 2018 -0500
Committer: Sahil Takiar 
Committed: Mon Jun 18 10:12:10 2018 -0500

--
 .../java/org/apache/hive/spark/client/SparkSubmitSparkClient.java  | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8c076762/spark-client/src/main/java/org/apache/hive/spark/client/SparkSubmitSparkClient.java
--
diff --git 
a/spark-client/src/main/java/org/apache/hive/spark/client/SparkSubmitSparkClient.java
 
b/spark-client/src/main/java/org/apache/hive/spark/client/SparkSubmitSparkClient.java
index 1a524b9..31e89b8 100644
--- 
a/spark-client/src/main/java/org/apache/hive/spark/client/SparkSubmitSparkClient.java
+++ 
b/spark-client/src/main/java/org/apache/hive/spark/client/SparkSubmitSparkClient.java
@@ -211,6 +211,8 @@ class SparkSubmitSparkClient extends AbstractSparkClient {
   LOG.warn("Child process exited with code {}", exitCode);
   rpcServer.cancelClient(clientId,
   "Child process (spark-submit) exited before connecting back with 
error log " + errStr.toString());
+} else {
+  LOG.info("Child process (spark-submit) exited successfully.");
 }
   } catch (InterruptedException ie) {
 LOG.warn("Thread waiting on the child process (spark-submit) is 
interrupted, killing the child process.");



[39/67] [abbrv] hive git commit: HIVE-19880: Repl Load to return recoverable vs non-recoverable error codes (Mahesh Kumar Behera, reviewed by Sankar Hariappan)

2018-06-18 Thread sershe
HIVE-19880: Repl Load to return recoverable vs non-recoverable error codes 
(Mahesh Kumar Behera, reviewed by Sankar Hariappan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f83d7654
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f83d7654
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f83d7654

Branch: refs/heads/master-txnstats
Commit: f83d7654ee8e6758c0026ed53a3a928914640e38
Parents: 24da460
Author: Sankar Hariappan 
Authored: Sun Jun 17 17:28:02 2018 -0700
Committer: Sankar Hariappan 
Committed: Sun Jun 17 17:28:02 2018 -0700

--
 .../hive/ql/parse/TestReplicationScenarios.java | 30 +++-
 .../org/apache/hive/jdbc/TestJdbcDriver2.java   | 22 ++
 .../org/apache/hadoop/hive/ql/ErrorMsg.java | 11 +++
 .../hadoop/hive/ql/exec/ReplCopyTask.java   |  3 +-
 .../hadoop/hive/ql/exec/repl/ReplDumpTask.java  |  3 +-
 .../ql/exec/repl/bootstrap/ReplLoadTask.java|  3 +-
 .../filesystem/DatabaseEventsIterator.java  |  4 +--
 .../ql/parse/ReplicationSemanticAnalyzer.java   | 22 +++---
 .../hadoop/hive/ql/parse/repl/CopyUtils.java| 16 ++-
 .../hive/ql/parse/repl/dump/TableExport.java|  2 +-
 .../ql/parse/repl/dump/io/FileOperations.java   |  5 ++--
 .../hive/metastore/HiveMetaStoreClient.java |  5 +++-
 .../hive/metastore/messaging/EventUtils.java|  4 +--
 13 files changed, 99 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f83d7654/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
index 862140f..689c859 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
@@ -73,6 +73,7 @@ import org.junit.rules.TestName;
 import org.junit.rules.TestRule;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hive.ql.ErrorMsg;
 
 import javax.annotation.Nullable;
 
@@ -853,7 +854,8 @@ public class TestReplicationScenarios {
 
InjectableBehaviourObjectStore.setGetNextNotificationBehaviour(eventIdSkipper);
 
 advanceDumpDir();
-verifyFail("REPL DUMP " + dbName + " FROM " + replDumpId, driver);
+CommandProcessorResponse ret = driver.run("REPL DUMP " + dbName + " FROM " 
+ replDumpId);
+assertTrue(ret.getResponseCode() == 
ErrorMsg.REPL_EVENTS_MISSING_IN_METASTORE.getErrorCode());
 eventIdSkipper.assertInjectionsPerformed(true,false);
 InjectableBehaviourObjectStore.resetGetNextNotificationBehaviour(); // 
reset the behaviour
   }
@@ -3158,6 +3160,32 @@ public class TestReplicationScenarios {
   }
 
   @Test
+  public void testLoadCmPathMissing() throws IOException {
+String dbName = createDB(testName.getMethodName(), driver);
+run("CREATE TABLE " + dbName + ".normal(a int)", driver);
+run("INSERT INTO " + dbName + ".normal values (1)", driver);
+
+advanceDumpDir();
+run("repl dump " + dbName, true, driver);
+String dumpLocation = getResult(0, 0, driver);
+
+run("DROP TABLE " + dbName + ".normal", driver);
+
+String cmDir = hconf.getVar(HiveConf.ConfVars.REPLCMDIR);
+Path path = new Path(cmDir);
+FileSystem fs = path.getFileSystem(hconf);
+ContentSummary cs = fs.getContentSummary(path);
+long fileCount = cs.getFileCount();
+assertTrue(fileCount != 0);
+fs.delete(path);
+
+CommandProcessorResponse ret = driverMirror.run("REPL LOAD " + dbName + " 
FROM '" + dumpLocation + "'");
+assertTrue(ret.getResponseCode() == 
ErrorMsg.REPL_FILE_MISSING_FROM_SRC_AND_CM_PATH.getErrorCode());
+run("drop database " + dbName, true, driver);
+fs.create(path, false);
+  }
+
+  @Test
   public void testDumpNonReplDatabase() throws IOException {
 String dbName = createDBNonRepl(testName.getMethodName(), driver);
 verifyFail("REPL DUMP " + dbName, driver);

http://git-wip-us.apache.org/repos/asf/hive/blob/f83d7654/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index d47c136..850b2d5 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -76,6 +76,7 @@ import 

[33/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java
index c9078be..9302791 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hive.ql.io.orc.encoded;
 
+import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
+import org.apache.orc.OrcFile;
 import org.apache.orc.impl.RunLengthByteReader;
 
 import java.io.IOException;
@@ -1200,6 +1202,147 @@ public class EncodedTreeReaderFactory extends 
TreeReaderFactory {
 }
   }
 
+  protected static class Decimal64StreamReader extends Decimal64TreeReader 
implements SettableTreeReader {
+private boolean _isFileCompressed;
+private SettableUncompressedStream _presentStream;
+private SettableUncompressedStream _valueStream;
+private List vectors;
+private int vectorIndex = 0;
+
+private Decimal64StreamReader(int columnId, int precision, int scale,
+  SettableUncompressedStream presentStream,
+  SettableUncompressedStream valueStream,
+  boolean isFileCompressed,
+  OrcProto.ColumnEncoding encoding, TreeReaderFactory.Context context,
+  List vectors) throws IOException {
+  super(columnId, presentStream, valueStream, encoding,
+precision, scale, context);
+  this._isFileCompressed = isFileCompressed;
+  this._presentStream = presentStream;
+  this._valueStream = valueStream;
+  this.vectors = vectors;
+}
+
+@Override
+public void seek(PositionProvider index) throws IOException {
+  if (vectors != null) return;
+  if (present != null) {
+if (_isFileCompressed) {
+  index.getNext();
+}
+present.seek(index);
+  }
+
+  // data stream could be empty stream or already reached end of stream 
before present stream.
+  // This can happen if all values in stream are nulls or last row group 
values are all null.
+  skipCompressedIndex(_isFileCompressed, index);
+  if (_valueStream.available() > 0) {
+valueReader.seek(index);
+  } else {
+skipSeek(index);
+  }
+}
+
+@Override
+public void nextVector(
+  ColumnVector previousVector, boolean[] isNull, int batchSize) throws 
IOException {
+  if (vectors == null) {
+super.nextVector(previousVector, isNull, batchSize);
+return;
+  }
+  vectors.get(vectorIndex++).shallowCopyTo(previousVector);
+  if (vectorIndex == vectors.size()) {
+vectors = null;
+  }
+}
+
+@Override
+public void setBuffers(EncodedColumnBatch batch, boolean 
sameStripe) {
+  assert vectors == null; // See the comment in 
TimestampStreamReader.setBuffers.
+  ColumnStreamData[] streamsData = batch.getColumnData(columnId);
+  if (_presentStream != null) {
+
_presentStream.setBuffers(StreamUtils.createDiskRangeInfo(streamsData[OrcProto.Stream.Kind.PRESENT_VALUE]));
+  }
+  if (_valueStream != null) {
+
_valueStream.setBuffers(StreamUtils.createDiskRangeInfo(streamsData[OrcProto.Stream.Kind.DATA_VALUE]));
+  }
+}
+
+public static class StreamReaderBuilder {
+  private int columnIndex;
+  private ColumnStreamData presentStream;
+  private ColumnStreamData valueStream;
+  private int scale;
+  private int precision;
+  private CompressionCodec compressionCodec;
+  private OrcProto.ColumnEncoding columnEncoding;
+  private List vectors;
+  private TreeReaderFactory.Context context;
+
+  public StreamReaderBuilder setColumnIndex(int columnIndex) {
+this.columnIndex = columnIndex;
+return this;
+  }
+
+  public StreamReaderBuilder setPrecision(int precision) {
+this.precision = precision;
+return this;
+  }
+
+  public StreamReaderBuilder setScale(int scale) {
+this.scale = scale;
+return this;
+  }
+
+  public StreamReaderBuilder setContext(TreeReaderFactory.Context context) 
{
+this.context = context;
+return this;
+  }
+
+  public StreamReaderBuilder setPresentStream(ColumnStreamData 
presentStream) {
+this.presentStream = presentStream;
+return this;
+  }
+
+  public StreamReaderBuilder setValueStream(ColumnStreamData valueStream) {
+this.valueStream = valueStream;
+return this;
+  }
+
+
+  public StreamReaderBuilder setCompressionCodec(CompressionCodec 
compressionCodec) {
+this.compressionCodec = compressionCodec;
+return this;
+  }
+
+  public 

[40/67] [abbrv] hive git commit: HIVE-19903: Disable temporary insert-only transactional table (Steve Yeom, reviewed by Jason Dere)

2018-06-18 Thread sershe
HIVE-19903: Disable temporary insert-only transactional table (Steve Yeom, 
reviewed by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/766c3dc2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/766c3dc2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/766c3dc2

Branch: refs/heads/master-txnstats
Commit: 766c3dc21e189afbecace308cd24cd1c5bde09b2
Parents: f83d765
Author: Jason Dere 
Authored: Sun Jun 17 21:38:05 2018 -0700
Committer: Jason Dere 
Committed: Sun Jun 17 21:38:05 2018 -0700

--
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  2 +-
 .../test/queries/clientpositive/mm_iow_temp.q   | 15 +
 .../results/clientpositive/mm_iow_temp.q.out| 61 
 3 files changed, 77 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/766c3dc2/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 284fcac..c2bcedd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -12847,7 +12847,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
 }
   }
 }
-boolean makeInsertOnly = HiveConf.getBoolVar(conf, 
ConfVars.HIVE_CREATE_TABLES_AS_INSERT_ONLY);
+boolean makeInsertOnly = !isTemporaryTable && HiveConf.getBoolVar(conf, 
ConfVars.HIVE_CREATE_TABLES_AS_INSERT_ONLY);
 boolean makeAcid = !isTemporaryTable &&
 MetastoreConf.getBoolVar(conf, 
MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID) &&
 HiveConf.getBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY) &&

http://git-wip-us.apache.org/repos/asf/hive/blob/766c3dc2/ql/src/test/queries/clientpositive/mm_iow_temp.q
--
diff --git a/ql/src/test/queries/clientpositive/mm_iow_temp.q 
b/ql/src/test/queries/clientpositive/mm_iow_temp.q
new file mode 100644
index 000..d6942e4
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/mm_iow_temp.q
@@ -0,0 +1,15 @@
+--! qt:dataset:src
+
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.create.as.insert.only=true;
+
+create temporary table temptable1 (
+  key string,
+  value string
+);
+
+insert overwrite table temptable1 select * from src; 
+
+show create table temptable1;
+select * from temptable1 order by key limit 10;

http://git-wip-us.apache.org/repos/asf/hive/blob/766c3dc2/ql/src/test/results/clientpositive/mm_iow_temp.q.out
--
diff --git a/ql/src/test/results/clientpositive/mm_iow_temp.q.out 
b/ql/src/test/results/clientpositive/mm_iow_temp.q.out
new file mode 100644
index 000..719a48a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/mm_iow_temp.q.out
@@ -0,0 +1,61 @@
+PREHOOK: query: create temporary table temptable1 (
+  key string,
+  value string
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@temptable1
+POSTHOOK: query: create temporary table temptable1 (
+  key string,
+  value string
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@temptable1
+PREHOOK: query: insert overwrite table temptable1 select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@temptable1
+POSTHOOK: query: insert overwrite table temptable1 select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@temptable1
+POSTHOOK: Lineage: temptable1.key SIMPLE [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
+POSTHOOK: Lineage: temptable1.value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
+PREHOOK: query: show create table temptable1
+PREHOOK: type: SHOW_CREATETABLE
+PREHOOK: Input: default@temptable1
+POSTHOOK: query: show create table temptable1
+POSTHOOK: type: SHOW_CREATETABLE
+POSTHOOK: Input: default@temptable1
+CREATE TEMPORARY TABLE `temptable1`(
+  `key` string, 
+  `value` string)
+ROW FORMAT SERDE 
+  'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' 
+STORED AS INPUTFORMAT 
+  'org.apache.hadoop.mapred.TextInputFormat' 
+OUTPUTFORMAT 
+  'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+LOCATION
+ A masked pattern was here 
+TBLPROPERTIES (
+  'bucketing_version'='2')
+PREHOOK: query: select * from temptable1 order by key limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@temptable1
+ A masked pattern was here 
+POSTHOOK: query: 

[31/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/llap_text.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/llap_text.q.out 
b/ql/src/test/results/clientpositive/llap/llap_text.q.out
new file mode 100644
index 000..40d08d3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/llap_text.q.out
@@ -0,0 +1,1082 @@
+PREHOOK: query: DROP TABLE text_llap
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE text_llap
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE text_llap(
+ctinyint TINYINT,
+csmallint SMALLINT,
+cint INT,
+cbigint BIGINT,
+cfloat FLOAT,
+cdouble DOUBLE,
+cstring1 STRING,
+cstring2 STRING,
+ctimestamp1 TIMESTAMP,
+ctimestamp2 TIMESTAMP,
+cboolean1 BOOLEAN,
+cboolean2 BOOLEAN)
+row format serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+stored as inputformat "org.apache.hadoop.mapred.TextInputFormat" 
+
+ outputformat "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@text_llap
+POSTHOOK: query: CREATE TABLE text_llap(
+ctinyint TINYINT,
+csmallint SMALLINT,
+cint INT,
+cbigint BIGINT,
+cfloat FLOAT,
+cdouble DOUBLE,
+cstring1 STRING,
+cstring2 STRING,
+ctimestamp1 TIMESTAMP,
+ctimestamp2 TIMESTAMP,
+cboolean1 BOOLEAN,
+cboolean2 BOOLEAN)
+row format serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+stored as inputformat "org.apache.hadoop.mapred.TextInputFormat" 
+
+ outputformat "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@text_llap
+PREHOOK: query: insert into table text_llap
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, 
cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 from alltypesorc 
+where cboolean2 is not null or cstring1 is not null or ctinyint is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@text_llap
+POSTHOOK: query: insert into table text_llap
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, 
cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 from alltypesorc 
+where cboolean2 is not null or cstring1 is not null or ctinyint is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@text_llap
+POSTHOOK: Lineage: text_llap.cbigint SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), 
]
+POSTHOOK: Lineage: text_llap.cboolean1 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, 
comment:null), ]
+POSTHOOK: Lineage: text_llap.cboolean2 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, 
comment:null), ]
+POSTHOOK: Lineage: text_llap.cdouble SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), 
]
+POSTHOOK: Lineage: text_llap.cfloat SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: text_llap.cint SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: text_llap.csmallint SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, 
comment:null), ]
+POSTHOOK: Lineage: text_llap.cstring1 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, 
comment:null), ]
+POSTHOOK: Lineage: text_llap.cstring2 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, 
comment:null), ]
+POSTHOOK: Lineage: text_llap.ctimestamp1 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, 
comment:null), ]
+POSTHOOK: Lineage: text_llap.ctimestamp2 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, 
comment:null), ]
+POSTHOOK: Lineage: text_llap.ctinyint SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, 
comment:null), ]
+PREHOOK: query: create table text_llap2(
+  t tinyint,
+  si smallint,
+  i int,
+  b bigint,
+  f float,
+  d double,
+  bo boolean,
+  s string,
+  ts timestamp, 
+  `dec` decimal,  
+  bin binary)
+row format delimited fields terminated by '|'
+stored as inputformat "org.apache.hadoop.mapred.TextInputFormat" 
+
+outputformat "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@text_llap2
+POSTHOOK: query: create table text_llap2(
+  t tinyint,
+  si smallint,
+  i int,
+  b bigint,
+  f float,
+  d double,
+  bo boolean,
+  s string,
+  ts timestamp, 
+  `dec` decimal,  

[43/67] [abbrv] hive git commit: HIVE-19909: qtests: retire hadoop_major version specific tests; and logics (Zoltan Haindrich reviewed by Teddy Choi)

2018-06-18 Thread sershe
HIVE-19909: qtests: retire hadoop_major version specific tests; and logics 
(Zoltan Haindrich reviewed by Teddy Choi)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4ec256c2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4ec256c2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4ec256c2

Branch: refs/heads/master-txnstats
Commit: 4ec256c23d5986385f0ad4ff0ae43b72822b6756
Parents: ebd2c5f
Author: Zoltan Haindrich 
Authored: Mon Jun 18 10:35:12 2018 +0200
Committer: Zoltan Haindrich 
Committed: Mon Jun 18 10:35:12 2018 +0200

--
 .../src/test/queries/negative/cascade_dbdrop.q  |   1 -
 .../queries/negative/cascade_dbdrop_hadoop20.q  |  29 --
 .../control/AbstractCoreBlobstoreCliDriver.java |   7 -
 .../hive/cli/control/CoreAccumuloCliDriver.java |   5 -
 .../hadoop/hive/cli/control/CoreCliDriver.java  |   8 -
 .../hive/cli/control/CoreCompareCliDriver.java  |   7 +-
 .../hive/cli/control/CoreHBaseCliDriver.java|   5 -
 .../cli/control/CoreHBaseNegativeCliDriver.java |   5 -
 .../hive/cli/control/CoreNegativeCliDriver.java |   7 +-
 .../hive/cli/control/CorePerfCliDriver.java |  10 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java| 110 +--
 ql/src/test/queries/clientnegative/archive1.q   |   1 -
 ql/src/test/queries/clientnegative/archive2.q   |   1 -
 ql/src/test/queries/clientnegative/archive3.q   |   1 -
 ql/src/test/queries/clientnegative/archive4.q   |   1 -
 .../queries/clientnegative/archive_corrupt.q|   1 -
 .../queries/clientnegative/archive_insert1.q|   1 -
 .../queries/clientnegative/archive_insert2.q|   1 -
 .../queries/clientnegative/archive_insert3.q|   1 -
 .../queries/clientnegative/archive_insert4.q|   1 -
 .../queries/clientnegative/archive_multi1.q |   1 -
 .../queries/clientnegative/archive_multi2.q |   1 -
 .../queries/clientnegative/archive_multi3.q |   1 -
 .../queries/clientnegative/archive_multi4.q |   1 -
 .../queries/clientnegative/archive_multi5.q |   1 -
 .../queries/clientnegative/archive_multi6.q |   1 -
 .../queries/clientnegative/archive_multi7.q |   1 -
 .../queries/clientnegative/archive_partspec1.q  |   1 -
 .../queries/clientnegative/archive_partspec2.q  |   1 -
 .../queries/clientnegative/archive_partspec3.q  |   1 -
 .../queries/clientnegative/archive_partspec4.q  |   1 -
 .../queries/clientnegative/archive_partspec5.q  |   1 -
 ql/src/test/queries/clientnegative/autolocal1.q |  16 --
 .../clientnegative/mapreduce_stack_trace.q  |   1 -
 .../mapreduce_stack_trace_turnoff.q |   1 -
 .../alter_numbuckets_partitioned_table_h23.q|   1 -
 .../test/queries/clientpositive/archive_multi.q |   1 -
 .../test/queries/clientpositive/auto_join14.q   |   1 -
 .../clientpositive/auto_join14_hadoop20.q   |  20 --
 .../cbo_rp_udaf_percentile_approx_23.q  |   1 -
 ql/src/test/queries/clientpositive/combine2.q   |   1 -
 .../queries/clientpositive/combine2_hadoop20.q  |  50 
 ql/src/test/queries/clientpositive/ctas.q   |   1 -
 .../queries/clientpositive/groupby_sort_1.q | 283 --
 .../queries/clientpositive/groupby_sort_1_23.q  |   1 -
 .../clientpositive/groupby_sort_skew_1.q| 285 ---
 .../clientpositive/groupby_sort_skew_1_23.q |   1 -
 .../infer_bucket_sort_list_bucket.q |   1 -
 ql/src/test/queries/clientpositive/input12.q|   1 -
 .../queries/clientpositive/input12_hadoop20.q   |  24 --
 ql/src/test/queries/clientpositive/input39.q|   1 -
 .../queries/clientpositive/input39_hadoop20.q   |  31 --
 ql/src/test/queries/clientpositive/join14.q |   1 -
 .../queries/clientpositive/join14_hadoop20.q|  17 --
 .../test/queries/clientpositive/lb_fs_stats.q   |   1 -
 .../queries/clientpositive/list_bucket_dml_1.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_11.q |   1 -
 .../queries/clientpositive/list_bucket_dml_12.q |   1 -
 .../queries/clientpositive/list_bucket_dml_13.q |   1 -
 .../queries/clientpositive/list_bucket_dml_14.q |   1 -
 .../queries/clientpositive/list_bucket_dml_2.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_3.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_4.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_5.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_6.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_7.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_8.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_9.q  |   1 -
 .../list_bucket_query_multiskew_1.q |   1 -
 .../list_bucket_query_multiskew_2.q |   1 -
 .../list_bucket_query_multiskew_3.q |   1 -
 .../list_bucket_query_oneskew_1.q   |   1 -
 .../list_bucket_query_oneskew_2.q   |   1 -
 .../list_bucket_query_oneskew_3.q   |   1 -
 

[55/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
--
diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php 
b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
index a29ebb7..9033e9a 100644
--- a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -203,6 +203,17 @@ final class SchemaVersionState {
   );
 }
 
+final class IsolationLevelCompliance {
+  const YES = 1;
+  const NO = 2;
+  const UNKNOWN = 3;
+  static public $__names = array(
+1 => 'YES',
+2 => 'NO',
+3 => 'UNKNOWN',
+  );
+}
+
 final class FunctionType {
   const JAVA = 1;
   static public $__names = array(
@@ -6517,6 +6528,18 @@ class Table {
* @var int
*/
   public $ownerType =   1;
+  /**
+   * @var int
+   */
+  public $txnId = -1;
+  /**
+   * @var string
+   */
+  public $validWriteIdList = null;
+  /**
+   * @var int
+   */
+  public $isStatsCompliant = null;
 
   public function __construct($vals=null) {
 if (!isset(self::$_TSPEC)) {
@@ -6609,6 +6632,18 @@ class Table {
   'var' => 'ownerType',
   'type' => TType::I32,
   ),
+19 => array(
+  'var' => 'txnId',
+  'type' => TType::I64,
+  ),
+20 => array(
+  'var' => 'validWriteIdList',
+  'type' => TType::STRING,
+  ),
+21 => array(
+  'var' => 'isStatsCompliant',
+  'type' => TType::I32,
+  ),
 );
 }
 if (is_array($vals)) {
@@ -,6 +6701,15 @@ class Table {
   if (isset($vals['ownerType'])) {
 $this->ownerType = $vals['ownerType'];
   }
+  if (isset($vals['txnId'])) {
+$this->txnId = $vals['txnId'];
+  }
+  if (isset($vals['validWriteIdList'])) {
+$this->validWriteIdList = $vals['validWriteIdList'];
+  }
+  if (isset($vals['isStatsCompliant'])) {
+$this->isStatsCompliant = $vals['isStatsCompliant'];
+  }
 }
   }
 
@@ -6841,6 +6885,27 @@ class Table {
 $xfer += $input->skip($ftype);
   }
   break;
+case 19:
+  if ($ftype == TType::I64) {
+$xfer += $input->readI64($this->txnId);
+  } else {
+$xfer += $input->skip($ftype);
+  }
+  break;
+case 20:
+  if ($ftype == TType::STRING) {
+$xfer += $input->readString($this->validWriteIdList);
+  } else {
+$xfer += $input->skip($ftype);
+  }
+  break;
+case 21:
+  if ($ftype == TType::I32) {
+$xfer += $input->readI32($this->isStatsCompliant);
+  } else {
+$xfer += $input->skip($ftype);
+  }
+  break;
 default:
   $xfer += $input->skip($ftype);
   break;
@@ -6978,6 +7043,21 @@ class Table {
   $xfer += $output->writeI32($this->ownerType);
   $xfer += $output->writeFieldEnd();
 }
+if ($this->txnId !== null) {
+  $xfer += $output->writeFieldBegin('txnId', TType::I64, 19);
+  $xfer += $output->writeI64($this->txnId);
+  $xfer += $output->writeFieldEnd();
+}
+if ($this->validWriteIdList !== null) {
+  $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 20);
+  $xfer += $output->writeString($this->validWriteIdList);
+  $xfer += $output->writeFieldEnd();
+}
+if ($this->isStatsCompliant !== null) {
+  $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 21);
+  $xfer += $output->writeI32($this->isStatsCompliant);
+  $xfer += $output->writeFieldEnd();
+}
 $xfer += $output->writeFieldStop();
 $xfer += $output->writeStructEnd();
 return $xfer;
@@ -7024,6 +7104,18 @@ class Partition {
* @var string
*/
   public $catName = null;
+  /**
+   * @var int
+   */
+  public $txnId = -1;
+  /**
+   * @var string
+   */
+  public $validWriteIdList = null;
+  /**
+   * @var int
+   */
+  public $isStatsCompliant = null;
 
   public function __construct($vals=null) {
 if (!isset(self::$_TSPEC)) {
@@ -7078,6 +7170,18 @@ class Partition {
   'var' => 'catName',
   'type' => TType::STRING,
   ),
+10 => array(
+  'var' => 'txnId',
+  'type' => TType::I64,
+  ),
+11 => array(
+  'var' => 'validWriteIdList',
+  'type' => TType::STRING,
+  ),
+12 => array(
+  'var' => 'isStatsCompliant',
+  'type' => TType::I32,
+  ),
 );
 }
 if (is_array($vals)) {
@@ -7108,6 +7212,15 @@ class Partition {
   if (isset($vals['catName'])) {
 $this->catName = $vals['catName'];
   }
+  if (isset($vals['txnId'])) {
+$this->txnId = $vals['txnId'];
+  }
+  if 

[56/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php 
b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index ec26cca..38895e3 100644
--- 
a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ 
b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -714,14 +714,12 @@ interface ThriftHiveMetastoreIf extends 
\FacebookServiceIf {
*/
   public function alter_partitions($db_name, $tbl_name, array $new_parts);
   /**
-   * @param string $db_name
-   * @param string $tbl_name
-   * @param \metastore\Partition[] $new_parts
-   * @param \metastore\EnvironmentContext $environment_context
+   * @param \metastore\AlterPartitionsRequest $req
+   * @return \metastore\AlterPartitionsResponse
* @throws \metastore\InvalidOperationException
* @throws \metastore\MetaException
*/
-  public function alter_partitions_with_environment_context($db_name, 
$tbl_name, array $new_parts, \metastore\EnvironmentContext 
$environment_context);
+  public function 
alter_partitions_with_environment_context(\metastore\AlterPartitionsRequest 
$req);
   /**
* @param string $db_name
* @param string $tbl_name
@@ -6394,19 +6392,16 @@ class ThriftHiveMetastoreClient extends 
\FacebookServiceClient implements \metas
 return;
   }
 
-  public function alter_partitions_with_environment_context($db_name, 
$tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context)
+  public function 
alter_partitions_with_environment_context(\metastore\AlterPartitionsRequest 
$req)
   {
-$this->send_alter_partitions_with_environment_context($db_name, $tbl_name, 
$new_parts, $environment_context);
-$this->recv_alter_partitions_with_environment_context();
+$this->send_alter_partitions_with_environment_context($req);
+return $this->recv_alter_partitions_with_environment_context();
   }
 
-  public function send_alter_partitions_with_environment_context($db_name, 
$tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context)
+  public function 
send_alter_partitions_with_environment_context(\metastore\AlterPartitionsRequest
 $req)
   {
 $args = new 
\metastore\ThriftHiveMetastore_alter_partitions_with_environment_context_args();
-$args->db_name = $db_name;
-$args->tbl_name = $tbl_name;
-$args->new_parts = $new_parts;
-$args->environment_context = $environment_context;
+$args->req = $req;
 $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_write_binary');
 if ($bin_accel)
 {
@@ -6442,13 +6437,16 @@ class ThriftHiveMetastoreClient extends 
\FacebookServiceClient implements \metas
   $result->read($this->input_);
   $this->input_->readMessageEnd();
 }
+if ($result->success !== null) {
+  return $result->success;
+}
 if ($result->o1 !== null) {
   throw $result->o1;
 }
 if ($result->o2 !== null) {
   throw $result->o2;
 }
-return;
+throw new \Exception("alter_partitions_with_environment_context failed: 
unknown result");
   }
 
   public function alter_partition_with_environment_context($db_name, 
$tbl_name, \metastore\Partition $new_part, \metastore\EnvironmentContext 
$environment_context)
@@ -15440,14 +15438,14 @@ class ThriftHiveMetastore_get_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size820 = 0;
-$_etype823 = 0;
-$xfer += $input->readListBegin($_etype823, $_size820);
-for ($_i824 = 0; $_i824 < $_size820; ++$_i824)
+$_size827 = 0;
+$_etype830 = 0;
+$xfer += $input->readListBegin($_etype830, $_size827);
+for ($_i831 = 0; $_i831 < $_size827; ++$_i831)
 {
-  $elem825 = null;
-  $xfer += $input->readString($elem825);
-  $this->success []= $elem825;
+  $elem832 = null;
+  $xfer += $input->readString($elem832);
+  $this->success []= $elem832;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -15483,9 +15481,9 @@ class ThriftHiveMetastore_get_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter826)
+  foreach ($this->success as $iter833)
   {
-$xfer += $output->writeString($iter826);
+$xfer += $output->writeString($iter833);
   }
 }
 $output->writeListEnd();
@@ -15616,14 +15614,14 @@ class ThriftHiveMetastore_get_all_databases_result {
 case 0:
   if ($ftype == 

[29/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/orc_merge6.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge6.q.out 
b/ql/src/test/results/clientpositive/llap/orc_merge6.q.out
index 3a25787..1359111 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge6.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge6.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, 
subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, 
subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@orc_merge5_n4
-POSTHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, 
subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, 
subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orc_merge5_n4
-PREHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, 
subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, 
hour int) stored as orc
+PREHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, 
subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (year 
string, hour int) stored as orc
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@orc_merge5a_n1
-POSTHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, 
subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, 
hour int) stored as orc
+POSTHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, 
subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (year 
string, hour int) stored as orc
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orc_merge5a_n1
@@ -50,7 +50,7 @@ STAGE PLANS:
 predicate: (userid <= 13L) (type: boolean)
 Statistics: Num rows: 1 Data size: 352 Basic stats: 
COMPLETE Column stats: NONE
 Select Operator
-  expressions: userid (type: bigint), string1 (type: 
string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: 
timestamp)
+  expressions: userid (type: bigint), string1 (type: 
string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: 
timestamp)
   outputColumnNames: _col0, _col1, _col2, _col3, _col4
   Statistics: Num rows: 1 Data size: 352 Basic stats: 
COMPLETE Column stats: NONE
   File Output Operator
@@ -62,7 +62,7 @@ STAGE PLANS:
 serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
 name: default.orc_merge5a_n1
   Select Operator
-expressions: _col0 (type: bigint), _col1 (type: 
string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: 
timestamp), '2000' (type: string), UDFToInteger('24') (type: int)
+expressions: _col0 (type: bigint), _col1 (type: 
string), _col2 (type: double), _col3 (type: decimal(38,0)), _col4 (type: 
timestamp), '2000' (type: string), UDFToInteger('24') (type: int)
 outputColumnNames: userid, string1, subtype, decimal1, 
ts, year, hour
 Statistics: Num rows: 1 Data size: 352 Basic stats: 
COMPLETE Column stats: NONE
 Group By Operator
@@ -76,7 +76,7 @@ STAGE PLANS:
 sort order: ++
 Map-reduce partition columns: _col0 (type: 
string), _col1 (type: int)
 Statistics: Num rows: 1 Data size: 352 Basic 
stats: COMPLETE Column stats: NONE
-value expressions: _col2 (type: 
struct),
 _col3 (type: 
struct),
 _col4 (type: 
struct),
 _col5 (type: 
struct),
 _col6 (type: 
struct)
+value expressions: _col2 (type: 
struct),
 _col3 (type: 
struct),
 _col4 (type: 
struct),
 _col5 (type: 
struct),
 _col6 (type: 
struct)
 Execution mode: llap
 LLAP IO: all inputs
 Reducer 2 
@@ -89,7 +89,7 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6
 Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE 
Column stats: NONE
 Select Operator
-  expressions: _col2 (type: 
struct),
 _col3 (type: 
struct),
 _col4 (type: 
struct),
 _col5 (type: 
struct),
 _col6 (type: 
struct),
 _col0 (type: 

[61/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
--
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h 
b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 5c6495e..68e34d5 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -185,6 +185,16 @@ struct SchemaVersionState {
 
 extern const std::map _SchemaVersionState_VALUES_TO_NAMES;
 
+struct IsolationLevelCompliance {
+  enum type {
+YES = 1,
+NO = 2,
+UNKNOWN = 3
+  };
+};
+
+extern const std::map 
_IsolationLevelCompliance_VALUES_TO_NAMES;
+
 struct FunctionType {
   enum type {
 JAVA = 1
@@ -667,6 +677,10 @@ class RuntimeStat;
 
 class GetRuntimeStatsRequest;
 
+class AlterPartitionsRequest;
+
+class AlterPartitionsResponse;
+
 class MetaException;
 
 class UnknownTableException;
@@ -3101,7 +3115,7 @@ inline std::ostream& operator<<(std::ostream& out, const 
StorageDescriptor& obj)
 }
 
 typedef struct _Table__isset {
-  _Table__isset() : tableName(false), dbName(false), owner(false), 
createTime(false), lastAccessTime(false), retention(false), sd(false), 
partitionKeys(false), parameters(false), viewOriginalText(false), 
viewExpandedText(false), tableType(false), privileges(false), temporary(true), 
rewriteEnabled(false), creationMetadata(false), catName(false), ownerType(true) 
{}
+  _Table__isset() : tableName(false), dbName(false), owner(false), 
createTime(false), lastAccessTime(false), retention(false), sd(false), 
partitionKeys(false), parameters(false), viewOriginalText(false), 
viewExpandedText(false), tableType(false), privileges(false), temporary(true), 
rewriteEnabled(false), creationMetadata(false), catName(false), 
ownerType(true), txnId(true), validWriteIdList(false), isStatsCompliant(false) 
{}
   bool tableName :1;
   bool dbName :1;
   bool owner :1;
@@ -3120,6 +3134,9 @@ typedef struct _Table__isset {
   bool creationMetadata :1;
   bool catName :1;
   bool ownerType :1;
+  bool txnId :1;
+  bool validWriteIdList :1;
+  bool isStatsCompliant :1;
 } _Table__isset;
 
 class Table {
@@ -3127,7 +3144,7 @@ class Table {
 
   Table(const Table&);
   Table& operator=(const Table&);
-  Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), 
retention(0), viewOriginalText(), viewExpandedText(), tableType(), 
temporary(false), rewriteEnabled(0), catName(), 
ownerType((PrincipalType::type)1) {
+  Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), 
retention(0), viewOriginalText(), viewExpandedText(), tableType(), 
temporary(false), rewriteEnabled(0), catName(), 
ownerType((PrincipalType::type)1), txnId(-1LL), validWriteIdList(), 
isStatsCompliant((IsolationLevelCompliance::type)0) {
 ownerType = (PrincipalType::type)1;
 
   }
@@ -3151,6 +3168,9 @@ class Table {
   CreationMetadata creationMetadata;
   std::string catName;
   PrincipalType::type ownerType;
+  int64_t txnId;
+  std::string validWriteIdList;
+  IsolationLevelCompliance::type isStatsCompliant;
 
   _Table__isset __isset;
 
@@ -3190,6 +3210,12 @@ class Table {
 
   void __set_ownerType(const PrincipalType::type val);
 
+  void __set_txnId(const int64_t val);
+
+  void __set_validWriteIdList(const std::string& val);
+
+  void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
   bool operator == (const Table & rhs) const
   {
 if (!(tableName == rhs.tableName))
@@ -3240,6 +3266,18 @@ class Table {
   return false;
 else if (__isset.ownerType && !(ownerType == rhs.ownerType))
   return false;
+if (__isset.txnId != rhs.__isset.txnId)
+  return false;
+else if (__isset.txnId && !(txnId == rhs.txnId))
+  return false;
+if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+  return false;
+else if (__isset.validWriteIdList && !(validWriteIdList == 
rhs.validWriteIdList))
+  return false;
+if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+  return false;
+else if (__isset.isStatsCompliant && !(isStatsCompliant == 
rhs.isStatsCompliant))
+  return false;
 return true;
   }
   bool operator != (const Table ) const {
@@ -3263,7 +3301,7 @@ inline std::ostream& operator<<(std::ostream& out, const 
Table& obj)
 }
 
 typedef struct _Partition__isset {
-  _Partition__isset() : values(false), dbName(false), tableName(false), 
createTime(false), lastAccessTime(false), sd(false), parameters(false), 
privileges(false), catName(false) {}
+  _Partition__isset() : values(false), dbName(false), tableName(false), 
createTime(false), lastAccessTime(false), sd(false), parameters(false), 
privileges(false), catName(false), txnId(true), validWriteIdList(false), 
isStatsCompliant(false) {}
   bool values :1;
   bool dbName :1;
   bool tableName :1;

[32/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/llap_acid.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/llap_acid.q.out 
b/ql/src/test/results/clientpositive/llap/llap_acid.q.out
index 6196efe..635f928 100644
--- a/ql/src/test/results/clientpositive/llap/llap_acid.q.out
+++ b/ql/src/test/results/clientpositive/llap/llap_acid.q.out
@@ -124,8 +124,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -269,8 +269,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -378,8 +378,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/llap_acid2.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/llap_acid2.q.out 
b/ql/src/test/results/clientpositive/llap/llap_acid2.q.out
index 4d74a17..c3e9c2a 100644
--- a/ql/src/test/results/clientpositive/llap/llap_acid2.q.out
+++ b/ql/src/test/results/clientpositive/llap/llap_acid2.q.out
@@ -16,8 +16,10 @@ PREHOOK: query: CREATE TABLE orc_llap_n2 (
 cfloat1 FLOAT,
 cdouble1 DOUBLE,
 cstring1 string,
-cfloat2 float
-)  stored as orc TBLPROPERTIES ('transactional'='true')
+cfloat2 float,
+cdecimal1 decimal(10,3),
+cdecimal2 decimal(38,10)
+)  stored as orc TBLPROPERTIES 
('transactional'='true','orc.write.format'='UNSTABLE-PRE-2.0')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@orc_llap_n2
@@ -35,8 +37,10 @@ POSTHOOK: query: CREATE TABLE orc_llap_n2 (
 cfloat1 FLOAT,
 cdouble1 DOUBLE,
 cstring1 string,
-cfloat2 float
-)  stored as orc TBLPROPERTIES ('transactional'='true')
+cfloat2 float,
+cdecimal1 decimal(10,3),
+cdecimal2 decimal(38,10)
+)  stored as orc TBLPROPERTIES 
('transactional'='true','orc.write.format'='UNSTABLE-PRE-2.0')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orc_llap_n2
@@ -44,7 +48,8 @@ PREHOOK: query: insert into table orc_llap_n2
 select cint, cbigint, cfloat, cdouble,
  cint as c1, cbigint as c2, cfloat as c3, cdouble as c4,
  cint as c8, cbigint as c7, cfloat as c6, cdouble as c5,
- cstring1, cfloat as c9 from alltypesorc order by cdouble asc  limit 30
+ cstring1, cfloat as c9, cast("1.123" as decimal(10,3))as c10,
+ cast("1.123456789" as decimal(38,18)) as c11 from alltypesorc order by 
cdouble asc  limit 30
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@orc_llap_n2
@@ -52,13 +57,58 @@ POSTHOOK: query: insert into table orc_llap_n2
 select cint, cbigint, cfloat, cdouble,
  cint as c1, cbigint as c2, cfloat as c3, cdouble as c4,
  cint as c8, cbigint as c7, cfloat as c6, cdouble as c5,
- cstring1, cfloat as c9 from alltypesorc order by cdouble asc  limit 30
+ cstring1, cfloat as c9, cast("1.123" as decimal(10,3))as c10,
+ cast("1.123456789" as decimal(38,18)) as c11 from alltypesorc order by 
cdouble asc  limit 30
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: default@orc_llap_n2
 POSTHOOK: Lineage: orc_llap_n2.cbigint SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), 
]
 POSTHOOK: Lineage: orc_llap_n2.cbigint0 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), 
]
 POSTHOOK: Lineage: orc_llap_n2.cbigint1 SIMPLE 

[35/67] [abbrv] hive git commit: HIVE-19876: Multiple fixes for Driver.isValidTxnListState (Jesus Camacho Rodriguez, reviewed by Eugene Koifman)

2018-06-18 Thread sershe
HIVE-19876: Multiple fixes for Driver.isValidTxnListState (Jesus Camacho 
Rodriguez, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b1004830
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b1004830
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b1004830

Branch: refs/heads/master-txnstats
Commit: b1004830ec95a74112ce37308d251b0366030824
Parents: dd51259
Author: Jesus Camacho Rodriguez 
Authored: Thu Jun 14 20:52:33 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Sat Jun 16 16:46:48 2018 -0700

--
 .../java/org/apache/hadoop/hive/ql/Context.java |  1 +
 .../java/org/apache/hadoop/hive/ql/Driver.java  | 30 +++-
 2 files changed, 17 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b1004830/ql/src/java/org/apache/hadoop/hive/ql/Context.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java 
b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
index e4e3d48..bb41e98 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
@@ -356,6 +356,7 @@ public class Context {
 this.executionIndex = ctx.executionIndex;
 this.viewsTokenRewriteStreams = new HashMap<>();
 this.rewrittenStatementContexts = new HashSet<>();
+this.opContext = new CompilationOpContext();
   }
 
   public Map getFsScratchDirs() {

http://git-wip-us.apache.org/repos/asf/hive/blob/b1004830/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java 
b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index abeb7fc..43a78ca 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -815,33 +815,35 @@ public class Driver implements IDriver {
 // The lock may have multiple components, e.g., DbHiveLock, hence we 
need
 // to check for each of them
 for (LockComponent lckCmp : lock.getHiveLockComponents()) {
-  if (lckCmp.getType() == LockType.EXCLUSIVE ||
-  lckCmp.getType() == LockType.SHARED_WRITE) {
+  // We only consider tables for which we hold either an exclusive
+  // or a shared write lock
+  if ((lckCmp.getType() == LockType.EXCLUSIVE ||
+  lckCmp.getType() == LockType.SHARED_WRITE) &&
+  lckCmp.getTablename() != null) {
 nonSharedLocks.add(
 Warehouse.getQualifiedName(
 lckCmp.getDbname(), lckCmp.getTablename()));
   }
 }
   } else {
-// The lock has a single components, e.g., SimpleHiveLock or 
ZooKeeperHiveLock
-if (lock.getHiveLockMode() == HiveLockMode.EXCLUSIVE ||
-lock.getHiveLockMode() == HiveLockMode.SEMI_SHARED) {
-  if (lock.getHiveLockObject().getPaths().length == 2) {
-// Pos 0 of lock paths array contains dbname, pos 1 contains 
tblname
-nonSharedLocks.add(
-Warehouse.getQualifiedName(
-lock.getHiveLockObject().getPaths()[0], 
lock.getHiveLockObject().getPaths()[1]));
-  }
+// The lock has a single components, e.g., SimpleHiveLock or 
ZooKeeperHiveLock.
+// Pos 0 of lock paths array contains dbname, pos 1 contains tblname
+if ((lock.getHiveLockMode() == HiveLockMode.EXCLUSIVE ||
+lock.getHiveLockMode() == HiveLockMode.SEMI_SHARED) &&
+lock.getHiveLockObject().getPaths().length == 2) {
+  nonSharedLocks.add(
+  Warehouse.getQualifiedName(
+  lock.getHiveLockObject().getPaths()[0], 
lock.getHiveLockObject().getPaths()[1]));
 }
   }
 }
 // 3) Get txn tables that are being written
-ValidTxnWriteIdList txnWriteIdList =
-new 
ValidTxnWriteIdList(conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY));
-if (txnWriteIdList == null) {
+String txnWriteIdListStr = 
conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY);
+if (txnWriteIdListStr == null || txnWriteIdListStr.length() == 0) {
   // Nothing to check
   return true;
 }
+ValidTxnWriteIdList txnWriteIdList = new 
ValidTxnWriteIdList(txnWriteIdListStr);
 List> writtenTables = getWrittenTableList(plan);
 ValidTxnWriteIdList currentTxnWriteIds =
 queryTxnMgr.getValidWriteIds(



[66/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/test/results/clientpositive/stats_nonpart.q.out
--
diff --git a/ql/src/test/results/clientpositive/stats_nonpart.q.out 
b/ql/src/test/results/clientpositive/stats_nonpart.q.out
new file mode 100644
index 000..cded846
--- /dev/null
+++ b/ql/src/test/results/clientpositive/stats_nonpart.q.out
@@ -0,0 +1,500 @@
+PREHOOK: query: drop table if exists mysource
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists mysource
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table mysource (p int,key int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mysource
+POSTHOOK: query: create table mysource (p int,key int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mysource
+PREHOOK: query: insert into mysource values (100,20), (101,40), (102,50)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,20), (101,40), (102,50)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+PREHOOK: query: insert into mysource values (100,30), (101,50), (102,60)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,30), (101,50), (102,60)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+PREHOOK: query: drop table if exists stats_nonpartitioned
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists stats_nonpartitioned
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table stats_nonpartitioned(key int, value int) stored 
as orc tblproperties ("transactional"="true")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_nonpartitioned
+POSTHOOK: query: create table stats_nonpartitioned(key int, value int) stored 
as orc tblproperties ("transactional"="true")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_nonpartitioned
+PREHOOK: query: explain select count(*) from stats_nonpartitioned
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: stats_nonpartitioned
+Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: COMPLETE
+Select Operator
+  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: COMPLETE
+  Group By Operator
+aggregations: count()
+mode: hash
+outputColumnNames: _col0
+Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL 
Column stats: COMPLETE
+Reduce Output Operator
+  sort order: 
+  Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL 
Column stats: COMPLETE
+  value expressions: _col0 (type: bigint)
+  Execution mode: vectorized
+  Reduce Operator Tree:
+Group By Operator
+  aggregations: count(VALUE._col0)
+  mode: mergepartial
+  outputColumnNames: _col0
+  Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column 
stats: COMPLETE
+  File Output Operator
+compressed: false
+Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column 
stats: COMPLETE
+table:
+input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+Fetch Operator
+  limit: -1
+  Processor Tree:
+ListSink
+
+PREHOOK: query: select count(*) from stats_nonpartitioned
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_nonpartitioned
+ A masked pattern was here 
+POSTHOOK: query: select count(*) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_nonpartitioned
+ A masked pattern was here 
+0
+PREHOOK: query: desc formatted stats_nonpartitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_nonpartitioned
+POSTHOOK: query: desc formatted stats_nonpartitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_nonpartitioned
+# col_name data_type   comment   

[11/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/orc_merge11.q.out
--
diff --git a/ql/src/test/results/clientpositive/orc_merge11.q.out 
b/ql/src/test/results/clientpositive/orc_merge11.q.out
index 1b2ddd3..8e7840c 100644
--- a/ql/src/test/results/clientpositive/orc_merge11.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge11.q.out
@@ -6,11 +6,11 @@ PREHOOK: query: DROP TABLE orc_split_elim_n0
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE orc_split_elim_n0
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table orc_split_elim_n0 (userid bigint, string1 string, 
subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_split_elim_n0 (userid bigint, string1 string, 
subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@orc_split_elim_n0
-POSTHOOK: query: create table orc_split_elim_n0 (userid bigint, string1 
string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_split_elim_n0 (userid bigint, string1 
string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orc_split_elim_n0
@@ -30,36 +30,36 @@ POSTHOOK: query: load data local inpath 
'../../data/files/orc_split_elim.orc' in
 POSTHOOK: type: LOAD
  A masked pattern was here 
 POSTHOOK: Output: default@orc_split_elim_n0
-PREHOOK: query: create table orcfile_merge1_n2 (userid bigint, string1 string, 
subtype double, decimal1 decimal, ts timestamp) stored as orc 
tblproperties("orc.compress.size"="4096")
+PREHOOK: query: create table orcfile_merge1_n2 (userid bigint, string1 string, 
subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc 
tblproperties("orc.compress.size"="4096")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@orcfile_merge1_n2
-POSTHOOK: query: create table orcfile_merge1_n2 (userid bigint, string1 
string, subtype double, decimal1 decimal, ts timestamp) stored as orc 
tblproperties("orc.compress.size"="4096")
+POSTHOOK: query: create table orcfile_merge1_n2 (userid bigint, string1 
string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc 
tblproperties("orc.compress.size"="4096")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orcfile_merge1_n2
-PREHOOK: query: insert overwrite table orcfile_merge1_n2 select * from 
orc_split_elim_n0
+PREHOOK: query: insert overwrite table orcfile_merge1_n2 select * from 
orc_split_elim_n0 order by userid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@orc_split_elim_n0
 PREHOOK: Output: default@orcfile_merge1_n2
-POSTHOOK: query: insert overwrite table orcfile_merge1_n2 select * from 
orc_split_elim_n0
+POSTHOOK: query: insert overwrite table orcfile_merge1_n2 select * from 
orc_split_elim_n0 order by userid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orc_split_elim_n0
 POSTHOOK: Output: default@orcfile_merge1_n2
-POSTHOOK: Lineage: orcfile_merge1_n2.decimal1 SIMPLE 
[(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:decimal1, 
type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orcfile_merge1_n2.decimal1 SIMPLE 
[(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:decimal1, 
type:decimal(38,0), comment:null), ]
 POSTHOOK: Lineage: orcfile_merge1_n2.string1 SIMPLE 
[(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:string1, type:string, 
comment:null), ]
 POSTHOOK: Lineage: orcfile_merge1_n2.subtype SIMPLE 
[(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:subtype, type:double, 
comment:null), ]
 POSTHOOK: Lineage: orcfile_merge1_n2.ts SIMPLE 
[(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:ts, type:timestamp, 
comment:null), ]
 POSTHOOK: Lineage: orcfile_merge1_n2.userid SIMPLE 
[(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:userid, type:bigint, 
comment:null), ]
-PREHOOK: query: insert into table orcfile_merge1_n2 select * from 
orc_split_elim_n0
+PREHOOK: query: insert into table orcfile_merge1_n2 select * from 
orc_split_elim_n0 order by userid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@orc_split_elim_n0
 PREHOOK: Output: default@orcfile_merge1_n2
-POSTHOOK: query: insert into table orcfile_merge1_n2 select * from 
orc_split_elim_n0
+POSTHOOK: query: insert into table orcfile_merge1_n2 select * from 
orc_split_elim_n0 order by userid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orc_split_elim_n0
 POSTHOOK: Output: default@orcfile_merge1_n2
-POSTHOOK: Lineage: orcfile_merge1_n2.decimal1 SIMPLE 
[(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:decimal1, 
type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orcfile_merge1_n2.decimal1 SIMPLE 
[(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:decimal1, 

[03/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_outer_join3.q.out
--
diff --git a/ql/src/test/results/clientpositive/vector_outer_join3.q.out 
b/ql/src/test/results/clientpositive/vector_outer_join3.q.out
index 74d774b..07a2c33 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join3.q.out
@@ -244,7 +244,7 @@ left outer join small_alltypesorc_a_n1 hd
   on hd.cstring1 = c.cstring1
 ) t1
 POSTHOOK: type: QUERY
-{"PLAN 
VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled
 IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT 
STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT 
STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias 
-> Map Local Tables:":{"$hdt$_1:cd":{"Fetch 
Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch 
Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator 
Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["cint"],"database:":"default","Statistics:":"Num
 rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: 
NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select
 Operator":{"expressions:":"cint (type: 
int)","columnExprMap:":{"_col0":"cint"},"outputColumnNames:":["_col0"],"Statistics:":"Num
 rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: 
NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":
 "_col0 (type: int)","1":"_col0 (type: 
int)"},"OperatorId:":"HASHTABLESINK_26"}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","columns:":["cstring1"],"database:":"default","Statistics:":"Num
 rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: 
NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_4","children":{"Select
 Operator":{"expressions:":"cstring1 (type: 
string)","columnExprMap:":{"_col0":"cstring1"},"outputColumnNames:":["_col0"],"Statistics:":"Num
 rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: 
NONE","OperatorId:":"SEL_5","children":{"HashTable Sink 
Operator":{"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: 
string)"},"OperatorId:":"HASHTABLESINK_24"},"Stage-3":{"Map 
Reduce":{"Map Operator 
Tree:":[{"TableScan":{"alias:":"c","columns:":["cint","cstring1"],"database:":"default","Statistics:":"Num
 rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: 
NONE","table:":"small_alltypesorc_a_n1","TableScan Vectorizat
 ion:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 
1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 
5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 
8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 
11:cboolean2:boolean, 
12:ROW__ID:struct]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select
 Operator":{"expressions:":"cint (type: int), cstring1 (type: 
string)","columnExprMap:":{"_col0":"cint","_col1":"cstring1"},"outputColumnNames:":["_col0","_col1"],"Select
 
Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2,
 6]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column 
stats: NONE","OperatorId:":"SEL_28","children":{"Map Join 
Operator":{"columnExprMap:":{"_col1":"0:_col1"},"condition map:":[{"":"Left 
Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: int)","1":"_col0 (type: 
int)"},"Map Join V
 ectorization:":{"bigTableKeyExpressions:":["col 
2:int"],"bigTableValueExpressions:":["col 
6:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable
 IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One 
MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS 
true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS 
true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS 
false"]},"outputColumnNames:":["_col1"],"Statistics:":"Num rows: 22 Data size: 
4840 Basic stats: COMPLETE Column stats: 
NONE","OperatorId:":"MAPJOIN_29","children":{"Map Join Operator":{"condition 
map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: 
string)","1":"_col0 (type: string)"},"Map Join 
Vectorization:":{"bigTableKeyExpressions:":["col 
0:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hash
 table IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One 
MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS 
true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS 
true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS 
false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic 

[06/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_case_when_1.q.out
--
diff --git a/ql/src/test/results/clientpositive/vector_case_when_1.q.out 
b/ql/src/test/results/clientpositive/vector_case_when_1.q.out
index 66807ac..59d8133 100644
--- a/ql/src/test/results/clientpositive/vector_case_when_1.q.out
+++ b/ql/src/test/results/clientpositive/vector_case_when_1.q.out
@@ -140,7 +140,6 @@ SELECT
IF(L_SUPPKEY > 1, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS 
Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 
365)) AS Field_12
 FROM lineitem_test
-ORDER BY Quantity
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
 SELECT
@@ -182,7 +181,6 @@ SELECT
IF(L_SUPPKEY > 1, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS 
Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 
365)) AS Field_12
 FROM lineitem_test
-ORDER BY Quantity
 POSTHOOK: type: QUERY
 Explain
 PLAN VECTORIZATION:
@@ -204,33 +202,19 @@ STAGE PLANS:
   expressions: l_quantity (type: int), CASE WHEN ((l_quantity = 
1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 
10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE ('Huge number') 
END (type: string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN 
((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN 
((l_quantity < 100)) THEN ('Many') ELSE (null) END (type: string), CASE WHEN 
((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN 
((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN (null) ELSE 
(null) END (type: string), if((l_shipmode = 'SHIP  '), date_add(l_shipdate, 
10), date_add(l_shipdate, 5)) (type: date), CASE WHEN ((l_returnflag = 'N')) 
THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0) END (type: double), 
CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) 
ELSE (0.0D) END (type: double), if((UDFToString(l_shipinstruct) = 'DELIVER
  IN PERSON'), null, l_tax) (type: decimal(10,2)), 
if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, null) (type: 
decimal(10,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, 
l_tax) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK 
RETURN'), l_tax, 0) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 
'DELIVER IN PERSON'), 0, l_tax) (type: decimal(10,2)), 
if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: 
decimal(10,2)), if((l_partkey > 30), CAST( l_receiptdate AS TIMESTAMP), CAST( 
l_commitdate AS TIMESTAMP)) (type: timestamp), if((l_suppkey > 1), 
datediff(l_receiptdate, l_commitdate), null) (type: int), if((l_suppkey > 
1), null, datediff(l_receiptdate, l_commitdate)) (type: int), 
if(((l_suppkey % 500) > 100), DATE'2009-01-01', DATE'2009-12-31') (type: date)
   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, 
_col16
   Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE 
Column stats: NONE
-  Reduce Output Operator
-key expressions: _col0 (type: int)
-sort order: +
+  File Output Operator
+compressed: false
 Statistics: Num rows: 101 Data size: 78500 Basic stats: 
COMPLETE Column stats: NONE
-value expressions: _col1 (type: string), _col2 (type: string), 
_col3 (type: string), _col4 (type: date), _col5 (type: double), _col6 (type: 
double), _col7 (type: decimal(10,2)), _col8 (type: decimal(10,2)), _col9 (type: 
decimal(12,2)), _col10 (type: decimal(12,2)), _col11 (type: decimal(10,2)), 
_col12 (type: decimal(10,2)), _col13 (type: timestamp), _col14 (type: int), 
_col15 (type: int), _col16 (type: date)
+table:
+input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   notVectorizedReason: SELECT operator: Unexpected hive type name void
   vectorized: false
-  Reduce Vectorization:
-  enabled: false
-  enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
-  enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS 
false
-  Reduce Operator Tree:
-Select Operator
-  expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: 
string), VALUE._col1 (type: 

[21/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out 
b/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out
index 9e1c8d7..4c9b737 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out
@@ -86,12 +86,12 @@ STAGE PLANS:
   Statistics: Num rows: 39 Data size: 4032 Basic stats: 
COMPLETE Column stats: COMPLETE
   TableScan Vectorization:
   native: true
-  vectorizationSchemaColumns: [0:key:decimal(14,5), 
1:value:int, 2:ROW__ID:struct]
+  vectorizationSchemaColumns: 
[0:key:decimal(14,5)/DECIMAL_64, 1:value:int, 
2:ROW__ID:struct]
   Filter Operator
 Filter Vectorization:
 className: VectorFilterOperator
 native: true
-predicateExpression: 
FilterDecimalColEqualDecimalScalar(col 0:decimal(14,5), val 10)
+predicateExpression: 
FilterDecimal64ColEqualDecimal64Scalar(col 0:decimal(14,5)/DECIMAL_64, val 
100)
 predicate: (key = 10) (type: boolean)
 Statistics: Num rows: 2 Data size: 224 Basic stats: 
COMPLETE Column stats: COMPLETE
 Select Operator
@@ -118,8 +118,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -127,7 +127,7 @@ STAGE PLANS:
 rowBatchContext:
 dataColumnCount: 2
 includeColumns: [0]
-dataColumns: key:decimal(14,5), value:int
+dataColumns: key:decimal(14,5)/DECIMAL_64, value:int
 partitionColumnCount: 0
 scratchColumnTypeNames: [double, double, double, double, 
double, double, double]
 
@@ -195,12 +195,12 @@ STAGE PLANS:
   Statistics: Num rows: 39 Data size: 4188 Basic stats: 
COMPLETE Column stats: COMPLETE
   TableScan Vectorization:
   native: true
-  vectorizationSchemaColumns: [0:key:decimal(14,5), 
1:value:int, 2:ROW__ID:struct]
+  vectorizationSchemaColumns: 
[0:key:decimal(14,5)/DECIMAL_64, 1:value:int, 
2:ROW__ID:struct]
   Filter Operator
 Filter Vectorization:
 className: VectorFilterOperator
 native: true
-predicateExpression: 
FilterDecimalColEqualDecimalScalar(col 0:decimal(14,5), val 10)
+predicateExpression: 
FilterDecimal64ColEqualDecimal64Scalar(col 0:decimal(14,5)/DECIMAL_64, val 
100)
 predicate: (key = 10) (type: boolean)
 Statistics: Num rows: 2 Data size: 232 Basic stats: 
COMPLETE Column stats: COMPLETE
 Select Operator
@@ -227,8 +227,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: true
@@ -236,7 +236,7 @@ STAGE PLANS:
 rowBatchContext:
 dataColumnCount: 2
 includeColumns: [0, 1]
-dataColumns: key:decimal(14,5), value:int
+dataColumns: key:decimal(14,5)/DECIMAL_64, value:int
 partitionColumnCount: 0
 scratchColumnTypeNames: [double, double, double, double, 
double, double, double, double]
 
@@ -310,12 +310,12 @@ STAGE PLANS:
   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE 
Column stats: NONE
   TableScan Vectorization:
   native: true
-  vectorizationSchemaColumns: [0:key:decimal(14,5), 
1:value:int, 2:ROW__ID:struct]
+  vectorizationSchemaColumns: 

[18/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_like_2.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/vector_like_2.q.out 
b/ql/src/test/results/clientpositive/llap/vector_like_2.q.out
index 8e132a7..f3ec37a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_like_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_like_2.q.out
@@ -74,8 +74,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_llap_text_1.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/vector_llap_text_1.q.out 
b/ql/src/test/results/clientpositive/llap/vector_llap_text_1.q.out
index a35b816..abddf5a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_llap_text_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_llap_text_1.q.out
@@ -179,8 +179,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -259,8 +258,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: true
 usesVectorUDFAdaptor: false

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_map_order.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/vector_map_order.q.out 
b/ql/src/test/results/clientpositive/llap/vector_map_order.q.out
index 02fc5a0..238555c 100644
--- a/ql/src/test/results/clientpositive/llap/vector_map_order.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_map_order.q.out
@@ -75,8 +75,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: false
 usesVectorUDFAdaptor: false

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out 
b/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
index 09a53d0..e0c7dfa 100644
--- a/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
@@ -67,8 +67,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -111,8 +110,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 

[15/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_windowing_navfn.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/vector_windowing_navfn.q.out 
b/ql/src/test/results/clientpositive/llap/vector_windowing_navfn.q.out
index 5466297..42e9694 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_navfn.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_navfn.q.out
@@ -100,8 +100,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -226,7 +225,7 @@ STAGE PLANS:
   Statistics: Num rows: 1 Data size: 304 Basic stats: COMPLETE 
Column stats: NONE
   TableScan Vectorization:
   native: true
-  vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 
2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 
8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 
11:ROW__ID:struct]
+  vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 
2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 
8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 
11:ROW__ID:struct]
   Reduce Output Operator
 key expressions: d (type: double), dec (type: decimal(4,2))
 sort order: ++
@@ -246,8 +245,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -255,7 +253,7 @@ STAGE PLANS:
 rowBatchContext:
 dataColumnCount: 11
 includeColumns: [5, 7, 9]
-dataColumns: t:tinyint, si:smallint, i:int, b:bigint, 
f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), 
bin:binary
+dataColumns: t:tinyint, si:smallint, i:int, b:bigint, 
f:float, d:double, bo:boolean, s:string, ts:timestamp, 
dec:decimal(4,2)/DECIMAL_64, bin:binary
 partitionColumnCount: 0
 scratchColumnTypeNames: []
 Reducer 2 
@@ -485,7 +483,7 @@ STAGE PLANS:
   Statistics: Num rows: 1 Data size: 340 Basic stats: COMPLETE 
Column stats: NONE
   TableScan Vectorization:
   native: true
-  vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 
2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 
8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 
11:ROW__ID:struct]
+  vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 
2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 
8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 
11:ROW__ID:struct]
   Reduce Output Operator
 key expressions: bin (type: binary), d (type: double), i 
(type: int)
 sort order: ++-
@@ -505,8 +503,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -514,7 +511,7 @@ STAGE PLANS:
 rowBatchContext:
 dataColumnCount: 11
 includeColumns: [2, 5, 7, 10]
-dataColumns: t:tinyint, si:smallint, i:int, b:bigint, 
f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), 
bin:binary
+dataColumns: t:tinyint, si:smallint, i:int, b:bigint, 
f:float, d:double, bo:boolean, s:string, ts:timestamp, 
dec:decimal(4,2)/DECIMAL_64, bin:binary
 

[24/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out 
b/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out
index 8e02351..8f0cc4d 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out
@@ -50,22 +50,22 @@ STAGE PLANS:
   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE 
Column stats: COMPLETE
   TableScan Vectorization:
   native: true
-  vectorizationSchemaColumns: [0:t:decimal(18,9), 
1:ROW__ID:struct]
+  vectorizationSchemaColumns: 
[0:t:decimal(18,9)/DECIMAL_64, 
1:ROW__ID:struct]
   Select Operator
 expressions: UDFToBoolean(t) (type: boolean)
 outputColumnNames: _col0
 Select Vectorization:
 className: VectorSelectOperator
 native: true
-projectedOutputColumnNums: [2]
-selectExpressions: CastDecimalToBoolean(col 
0:decimal(18,9)) -> 2:boolean
+projectedOutputColumnNums: [3]
+selectExpressions: CastDecimalToBoolean(col 
2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 
0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:boolean
 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE 
Column stats: COMPLETE
 Reduce Output Operator
   key expressions: _col0 (type: boolean)
   sort order: +
   Reduce Sink Vectorization:
   className: VectorReduceSinkObjectHashOperator
-  keyColumnNums: [2]
+  keyColumnNums: [3]
   native: true
   nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
   valueColumnNums: []
@@ -75,8 +75,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -84,9 +84,9 @@ STAGE PLANS:
 rowBatchContext:
 dataColumnCount: 1
 includeColumns: [0]
-dataColumns: t:decimal(18,9)
+dataColumns: t:decimal(18,9)/DECIMAL_64
 partitionColumnCount: 0
-scratchColumnTypeNames: [bigint]
+scratchColumnTypeNames: [decimal(18,9), bigint]
 Reducer 2 
 Execution mode: vectorized, llap
 Reduce Vectorization:
@@ -166,22 +166,22 @@ STAGE PLANS:
   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE 
Column stats: COMPLETE
   TableScan Vectorization:
   native: true
-  vectorizationSchemaColumns: [0:t:decimal(18,9), 
1:ROW__ID:struct]
+  vectorizationSchemaColumns: 
[0:t:decimal(18,9)/DECIMAL_64, 
1:ROW__ID:struct]
   Select Operator
 expressions: UDFToByte(t) (type: tinyint)
 outputColumnNames: _col0
 Select Vectorization:
 className: VectorSelectOperator
 native: true
-projectedOutputColumnNums: [2]
-selectExpressions: CastDecimalToLong(col 
0:decimal(18,9)) -> 2:tinyint
+projectedOutputColumnNums: [3]
+selectExpressions: CastDecimalToLong(col 
2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 
0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:tinyint
 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE 
Column stats: COMPLETE
 Reduce Output Operator
   key expressions: _col0 (type: tinyint)
   sort order: +
   Reduce Sink Vectorization:
   className: VectorReduceSinkObjectHashOperator
-  keyColumnNums: 

[04/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_decimal_round.q.out
--
diff --git a/ql/src/test/results/clientpositive/vector_decimal_round.q.out 
b/ql/src/test/results/clientpositive/vector_decimal_round.q.out
index cdf0ba4..d690579 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_round.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_round.q.out
@@ -473,15 +473,15 @@ STAGE PLANS:
 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE 
Column stats: NONE
 TableScan Vectorization:
 native: true
-vectorizationSchemaColumns: [0:dec:decimal(10,0), 
1:ROW__ID:struct]
+vectorizationSchemaColumns: [0:dec:decimal(10,0)/DECIMAL_64, 
1:ROW__ID:struct]
 Select Operator
   expressions: dec (type: decimal(10,0)), round(dec, -1) (type: 
decimal(11,0))
   outputColumnNames: _col0, _col1
   Select Vectorization:
   className: VectorSelectOperator
   native: true
-  projectedOutputColumnNums: [0, 2]
-  selectExpressions: 
FuncRoundWithNumDigitsDecimalToDecimal(col 0:decimal(10,0), decimalPlaces -1) 
-> 2:decimal(11,0)
+  projectedOutputColumnNums: [0, 3]
+  selectExpressions: 
FuncRoundWithNumDigitsDecimalToDecimal(col 2:decimal(10,0), decimalPlaces 
-1)(children: ConvertDecimal64ToDecimal(col 0:decimal(10,0)/DECIMAL_64) -> 
2:decimal(10,0)) -> 3:decimal(11,0)
   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE 
Column stats: NONE
   Reduce Output Operator
 key expressions: _col0 (type: decimal(10,0))
@@ -497,8 +497,8 @@ STAGE PLANS:
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
-  inputFormatFeatureSupport: []
-  featureSupportInUse: []
+  inputFormatFeatureSupport: [DECIMAL_64]
+  featureSupportInUse: [DECIMAL_64]
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   allNative: false
   usesVectorUDFAdaptor: false
@@ -506,9 +506,9 @@ STAGE PLANS:
   rowBatchContext:
   dataColumnCount: 1
   includeColumns: [0]
-  dataColumns: dec:decimal(10,0)
+  dataColumns: dec:decimal(10,0)/DECIMAL_64
   partitionColumnCount: 0
-  scratchColumnTypeNames: [decimal(11,0)]
+  scratchColumnTypeNames: [decimal(10,0), decimal(11,0)]
   Reduce Vectorization:
   enabled: false
   enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -564,15 +564,15 @@ STAGE PLANS:
 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE 
Column stats: NONE
 TableScan Vectorization:
 native: true
-vectorizationSchemaColumns: [0:dec:decimal(10,0), 
1:ROW__ID:struct]
+vectorizationSchemaColumns: [0:dec:decimal(10,0)/DECIMAL_64, 
1:ROW__ID:struct]
 Select Operator
   expressions: dec (type: decimal(10,0)), round(dec, -1) (type: 
decimal(11,0))
   outputColumnNames: _col0, _col2
   Select Vectorization:
   className: VectorSelectOperator
   native: true
-  projectedOutputColumnNums: [0, 2]
-  selectExpressions: 
FuncRoundWithNumDigitsDecimalToDecimal(col 0:decimal(10,0), decimalPlaces -1) 
-> 2:decimal(11,0)
+  projectedOutputColumnNums: [0, 3]
+  selectExpressions: 
FuncRoundWithNumDigitsDecimalToDecimal(col 2:decimal(10,0), decimalPlaces 
-1)(children: ConvertDecimal64ToDecimal(col 0:decimal(10,0)/DECIMAL_64) -> 
2:decimal(10,0)) -> 3:decimal(11,0)
   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE 
Column stats: NONE
   Reduce Output Operator
 key expressions: _col2 (type: decimal(11,0))
@@ -588,8 +588,8 @@ STAGE PLANS:
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
-  inputFormatFeatureSupport: []
-  featureSupportInUse: []
+  inputFormatFeatureSupport: [DECIMAL_64]
+  featureSupportInUse: [DECIMAL_64]
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   allNative: false
   usesVectorUDFAdaptor: false
@@ -597,9 +597,9 @@ STAGE PLANS:
   rowBatchContext:
   dataColumnCount: 1
   includeColumns: [0]
-  dataColumns: dec:decimal(10,0)
+  dataColumns: dec:decimal(10,0)/DECIMAL_64
   partitionColumnCount: 0
-  scratchColumnTypeNames: [decimal(11,0)]
+ 

[09/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out 
b/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out
index e81ae06..617b873 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, 
subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, 
subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@orc_merge5
-POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, 
subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, 
subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orc_merge5
-PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, 
subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) 
stored as orc
+PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, 
subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st 
double) stored as orc
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@orc_merge5a
-POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, 
subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) 
stored as orc
+POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, 
subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st 
double) stored as orc
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orc_merge5a
@@ -42,7 +42,7 @@ STAGE PLANS:
   alias: orc_merge5
   Statistics: Num rows: 1 Data size: 2464020 Basic stats: 
COMPLETE Column stats: NONE
   Select Operator
-expressions: userid (type: bigint), string1 (type: 
string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: 
timestamp), subtype (type: double)
+expressions: userid (type: bigint), string1 (type: 
string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: 
timestamp), subtype (type: double)
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
 Statistics: Num rows: 1 Data size: 2464020 Basic stats: 
COMPLETE Column stats: NONE
 File Output Operator
@@ -81,22 +81,22 @@ POSTHOOK: Output: default@orc_merge5a@st=0.8
 POSTHOOK: Output: default@orc_merge5a@st=1.8
 POSTHOOK: Output: default@orc_merge5a@st=8.0
 POSTHOOK: Output: default@orc_merge5a@st=80.0
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE 
[(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), 
comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE 
[(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), 
comment:null), ]
 POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE 
[(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
 POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE 
[(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
 POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE 
[(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE 
[(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE 
[(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), 
comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE 
[(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), 
comment:null), ]
 POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE 
[(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
 POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE 
[(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
 POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE 
[(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE 
[(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a 

[16/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_udf1.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/vector_udf1.q.out 
b/ql/src/test/results/clientpositive/llap/vector_udf1.q.out
index 9e6e8e5..aef23fd 100644
--- a/ql/src/test/results/clientpositive/llap/vector_udf1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_udf1.q.out
@@ -95,8 +95,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -196,8 +196,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -297,8 +297,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -398,8 +398,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: true
@@ -499,8 +499,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: true
@@ -600,8 +600,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: true
@@ -701,8 +701,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: true
@@ -802,8 +802,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: true
@@ -903,8 +903,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 

[05/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_case_when_2.q.out
--
diff --git a/ql/src/test/results/clientpositive/vector_case_when_2.q.out 
b/ql/src/test/results/clientpositive/vector_case_when_2.q.out
index b8a5214..76c7f3d 100644
--- a/ql/src/test/results/clientpositive/vector_case_when_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_case_when_2.q.out
@@ -392,8 +392,8 @@ STAGE PLANS:
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
-  inputFormatFeatureSupport: []
-  featureSupportInUse: []
+  inputFormatFeatureSupport: [DECIMAL_64]
+  featureSupportInUse: [DECIMAL_64]
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   allNative: false
   usesVectorUDFAdaptor: true
@@ -651,8 +651,8 @@ STAGE PLANS:
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
-  inputFormatFeatureSupport: []
-  featureSupportInUse: []
+  inputFormatFeatureSupport: [DECIMAL_64]
+  featureSupportInUse: [DECIMAL_64]
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   allNative: false
   usesVectorUDFAdaptor: true

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_cast_constant.q.out
--
diff --git a/ql/src/test/results/clientpositive/vector_cast_constant.q.out 
b/ql/src/test/results/clientpositive/vector_cast_constant.q.out
index d8a534f..8c596a6 100644
--- a/ql/src/test/results/clientpositive/vector_cast_constant.q.out
+++ b/ql/src/test/results/clientpositive/vector_cast_constant.q.out
@@ -165,8 +165,8 @@ STAGE PLANS:
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
-  inputFormatFeatureSupport: []
-  featureSupportInUse: []
+  inputFormatFeatureSupport: [DECIMAL_64]
+  featureSupportInUse: [DECIMAL_64]
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   allNative: false
   usesVectorUDFAdaptor: false

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_char_2.q.out
--
diff --git a/ql/src/test/results/clientpositive/vector_char_2.q.out 
b/ql/src/test/results/clientpositive/vector_char_2.q.out
index 97038ee..dc2c1e4 100644
--- a/ql/src/test/results/clientpositive/vector_char_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_char_2.q.out
@@ -126,8 +126,8 @@ STAGE PLANS:
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
-  inputFormatFeatureSupport: []
-  featureSupportInUse: []
+  inputFormatFeatureSupport: [DECIMAL_64]
+  featureSupportInUse: [DECIMAL_64]
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   allNative: false
   usesVectorUDFAdaptor: false
@@ -314,8 +314,8 @@ STAGE PLANS:
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
-  inputFormatFeatureSupport: []
-  featureSupportInUse: []
+  inputFormatFeatureSupport: [DECIMAL_64]
+  featureSupportInUse: [DECIMAL_64]
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   allNative: false
   usesVectorUDFAdaptor: false

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_char_4.q.out
--
diff --git a/ql/src/test/results/clientpositive/vector_char_4.q.out 
b/ql/src/test/results/clientpositive/vector_char_4.q.out
index 5b9f272..8d27537 100644
--- a/ql/src/test/results/clientpositive/vector_char_4.q.out
+++ b/ql/src/test/results/clientpositive/vector_char_4.q.out
@@ -174,8 +174,8 @@ STAGE PLANS:
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
-  inputFormatFeatureSupport: []
-  featureSupportInUse: []
+  inputFormatFeatureSupport: [DECIMAL_64]
+  featureSupportInUse: [DECIMAL_64]
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   allNative: false
   usesVectorUDFAdaptor: false

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out

[20/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out 
b/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
index dcedca8..815b2a3 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
@@ -112,8 +112,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -241,8 +241,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -378,8 +378,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -527,8 +527,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -687,8 +687,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -829,8 +829,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -953,8 +953,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -1083,8 +1083,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false


[07/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out 
b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
index 95ebf46..1827f67 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
@@ -283,8 +283,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -491,8 +491,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: true
@@ -683,8 +683,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: true
@@ -875,8 +875,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: true
@@ -1016,8 +1016,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -1141,8 +1141,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -1284,8 +1284,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out 
b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
index 3c9cf03..d10faeb 100644
--- 
a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
+++ 
b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
@@ -370,10 +370,10 @@ POSTHOOK: 

[26/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_case_when_1.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/vector_case_when_1.q.out 
b/ql/src/test/results/clientpositive/llap/vector_case_when_1.q.out
index 2581311..ab083e7 100644
--- a/ql/src/test/results/clientpositive/llap/vector_case_when_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_case_when_1.q.out
@@ -140,7 +140,6 @@ SELECT
IF(L_SUPPKEY > 1, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS 
Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 
365)) AS Field_12
 FROM lineitem_test
-ORDER BY Quantity
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
 SELECT
@@ -182,7 +181,6 @@ SELECT
IF(L_SUPPKEY > 1, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS 
Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 
365)) AS Field_12
 FROM lineitem_test
-ORDER BY Quantity
 POSTHOOK: type: QUERY
 Explain
 PLAN VECTORIZATION:
@@ -197,9 +195,6 @@ STAGE PLANS:
   Stage: Stage-1
 Tez
  A masked pattern was here 
-  Edges:
-Reducer 2 <- Map 1 (SIMPLE_EDGE)
- A masked pattern was here 
   Vertices:
 Map 1 
 Map Operator Tree:
@@ -210,11 +205,13 @@ STAGE PLANS:
 expressions: l_quantity (type: int), CASE WHEN 
((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN 
((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE 
('Huge number') END (type: string), CASE WHEN ((l_quantity = 1)) THEN 
('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN 
('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE (null) END (type: 
string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) 
THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) 
THEN (null) ELSE (null) END (type: string), if((l_shipmode = 'SHIP  '), 
date_add(l_shipdate, 10), date_add(l_shipdate, 5)) (type: date), CASE WHEN 
((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0) 
END (type: double), CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * 
(1.0D - l_discount))) ELSE (0.0D) END (type: double), 
if((UDFToString(l_shipinstruct) = 'D
 ELIVER IN PERSON'), null, l_tax) (type: decimal(10,2)), 
if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, null) (type: 
decimal(10,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, 
l_tax) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK 
RETURN'), l_tax, 0) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 
'DELIVER IN PERSON'), 0, l_tax) (type: decimal(10,2)), 
if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: 
decimal(10,2)), if((l_partkey > 30), CAST( l_receiptdate AS TIMESTAMP), CAST( 
l_commitdate AS TIMESTAMP)) (type: timestamp), if((l_suppkey > 1), 
datediff(l_receiptdate, l_commitdate), null) (type: int), if((l_suppkey > 
1), null, datediff(l_receiptdate, l_commitdate)) (type: int), 
if(((l_suppkey % 500) > 100), DATE'2009-01-01', DATE'2009-12-31') (type: date)
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, 
_col15, _col16
 Statistics: Num rows: 101 Data size: 57327 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: int)
-  sort order: +
+File Output Operator
+  compressed: false
   Statistics: Num rows: 101 Data size: 57327 Basic stats: 
COMPLETE Column stats: NONE
-  value expressions: _col1 (type: string), _col2 (type: 
string), _col3 (type: string), _col4 (type: date), _col5 (type: double), _col6 
(type: double), _col7 (type: decimal(10,2)), _col8 (type: decimal(10,2)), _col9 
(type: decimal(12,2)), _col10 (type: decimal(12,2)), _col11 (type: 
decimal(10,2)), _col12 (type: decimal(10,2)), _col13 (type: timestamp), _col14 
(type: int), _col15 (type: int), _col16 (type: date)
+  table:
+  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+  serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 Execution mode: llap
 LLAP IO: all inputs
 Map Vectorization:
@@ -223,40 +220,6 @@ STAGE PLANS:
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 notVectorizedReason: SELECT operator: Unexpected hive type 
name void
 vectorized: false

[30/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/mergejoin.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/mergejoin.q.out 
b/ql/src/test/results/clientpositive/llap/mergejoin.q.out
index 63bf690..1e4f632 100644
--- a/ql/src/test/results/clientpositive/llap/mergejoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/mergejoin.q.out
@@ -64,8 +64,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -148,8 +147,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -1776,8 +1774,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -1821,8 +1819,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -1984,8 +1982,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -2068,8 +2066,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -2120,8 +2117,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -2382,8 +2379,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -2435,8 +2432,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true

[10/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out
--
diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out 
b/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out
index 0b76bfb..95fa5ca 100644
--- a/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, 
subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, 
subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@orc_merge5
-POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, 
subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, 
subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orc_merge5
-PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, 
subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) 
stored as orc
+PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, 
subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st 
double) stored as orc
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@orc_merge5a
-POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, 
subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) 
stored as orc
+POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, 
subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st 
double) stored as orc
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orc_merge5a
@@ -39,7 +39,7 @@ STAGE PLANS:
 alias: orc_merge5
 Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE 
Column stats: NONE
 Select Operator
-  expressions: userid (type: bigint), string1 (type: string), 
subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp), 
subtype (type: double)
+  expressions: userid (type: bigint), string1 (type: string), 
subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp), 
subtype (type: double)
   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
   Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE 
Column stats: NONE
   File Output Operator
@@ -51,7 +51,7 @@ STAGE PLANS:
 serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
 name: default.orc_merge5a
   Select Operator
-expressions: _col0 (type: bigint), _col1 (type: string), _col2 
(type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp), _col5 
(type: double)
+expressions: _col0 (type: bigint), _col1 (type: string), _col2 
(type: double), _col3 (type: decimal(38,0)), _col4 (type: timestamp), _col5 
(type: double)
 outputColumnNames: userid, string1, subtype, decimal1, ts, st
 Statistics: Num rows: 1 Data size: 2464020 Basic stats: 
COMPLETE Column stats: NONE
 Group By Operator
@@ -65,7 +65,7 @@ STAGE PLANS:
 sort order: +
 Map-reduce partition columns: _col0 (type: double)
 Statistics: Num rows: 1 Data size: 2464020 Basic stats: 
COMPLETE Column stats: NONE
-value expressions: _col1 (type: 
struct),
 _col2 (type: 
struct),
 _col3 (type: 
struct),
 _col4 (type: 
struct),
 _col5 (type: 
struct)
+value expressions: _col1 (type: 
struct),
 _col2 (type: 
struct),
 _col3 (type: 
struct),
 _col4 (type: 
struct),
 _col5 (type: 
struct)
   Reduce Operator Tree:
 Group By Operator
   aggregations: compute_stats(VALUE._col0), 
compute_stats(VALUE._col1), compute_stats(VALUE._col2), 
compute_stats(VALUE._col3), compute_stats(VALUE._col4)
@@ -74,7 +74,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
   Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: _col1 (type: 
struct),
 _col2 (type: 
struct),
 _col3 (type: 
struct),
 _col4 (type: 
struct),
 _col5 (type: 
struct),
 _col0 (type: double)
+expressions: _col1 (type: 
struct),
 _col2 (type: 
struct),
 _col3 (type: 
struct),
 _col4 (type: 
struct),
 _col5 (type: 
struct),
 _col0 (type: double)
   

[12/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_mapjoin3.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin3.q.out 
b/ql/src/test/results/clientpositive/llap/vectorized_mapjoin3.q.out
index f05e5c0..053826e 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin3.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_mapjoin3.q.out
@@ -122,12 +122,12 @@ STAGE PLANS:
   Statistics: Num rows: 5 Data size: 580 Basic stats: COMPLETE 
Column stats: COMPLETE
   TableScan Vectorization:
   native: true
-  vectorizationSchemaColumns: 
[0:decimal0801_col:decimal(8,1), 1:int_col_1:int, 
2:ROW__ID:struct]
+  vectorizationSchemaColumns: 
[0:decimal0801_col:decimal(8,1)/DECIMAL_64, 1:int_col_1:int, 
2:ROW__ID:struct]
   Filter Operator
 Filter Vectorization:
 className: VectorFilterOperator
 native: true
-predicateExpression: FilterExprAndExpr(children: 
SelectColumnIsNotNull(col 0:decimal(8,1)), SelectColumnIsNotNull(col 1:int))
+predicateExpression: FilterExprAndExpr(children: 
SelectColumnIsNotNull(col 3:decimal(8,1))(children: 
ConvertDecimal64ToDecimal(col 0:decimal(8,1)/DECIMAL_64) -> 3:decimal(8,1)), 
SelectColumnIsNotNull(col 1:int))
 predicate: (decimal0801_col is not null and int_col_1 is 
not null) (type: boolean)
 Statistics: Num rows: 4 Data size: 464 Basic stats: 
COMPLETE Column stats: COMPLETE
 Select Operator
@@ -146,12 +146,13 @@ STAGE PLANS:
   1 _col0 (type: int)
 Map Join Vectorization:
 bigTableKeyColumnNums: [1]
-bigTableRetainedColumnNums: [0]
-bigTableValueColumnNums: [0]
+bigTableRetainedColumnNums: [3]
+bigTableValueColumnNums: [3]
+bigTableValueExpressions: 
ConvertDecimal64ToDecimal(col 0:decimal(8,1)/DECIMAL_64) -> 3:decimal(8,1)
 className: VectorMapJoinInnerBigOnlyLongOperator
 native: true
 nativeConditionsMet: 
hive.mapjoin.optimized.hashtable IS true, 
hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS 
true, Small table vectorizes IS true, Optimized Table and Supports Key Types IS 
true
-projectedOutputColumnNums: [0]
+projectedOutputColumnNums: [3]
 outputColumnNames: _col0
 input vertices:
   1 Reducer 3
@@ -171,8 +172,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -180,9 +181,9 @@ STAGE PLANS:
 rowBatchContext:
 dataColumnCount: 2
 includeColumns: [0, 1]
-dataColumns: decimal0801_col:decimal(8,1), int_col_1:int
+dataColumns: decimal0801_col:decimal(8,1)/DECIMAL_64, 
int_col_1:int
 partitionColumnCount: 0
-scratchColumnTypeNames: []
+scratchColumnTypeNames: [decimal(8,1)]
 Map 2 
 Map Operator Tree:
 TableScan
@@ -226,8 +227,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -325,12 +326,12 @@ STAGE PLANS:
   Statistics: Num rows: 5 Data size: 580 Basic stats: COMPLETE 
Column stats: COMPLETE
   TableScan Vectorization:
   native: true
-  vectorizationSchemaColumns: 

[17/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_outer_reference_windowed.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/vector_outer_reference_windowed.q.out 
b/ql/src/test/results/clientpositive/llap/vector_outer_reference_windowed.q.out
index 89c14d5..2b0a1e7 100644
--- 
a/ql/src/test/results/clientpositive/llap/vector_outer_reference_windowed.q.out
+++ 
b/ql/src/test/results/clientpositive/llap/vector_outer_reference_windowed.q.out
@@ -271,7 +271,7 @@ STAGE PLANS:
   Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE 
Column stats: COMPLETE
   TableScan Vectorization:
   native: true
-  vectorizationSchemaColumns: [0:c1:decimal(15,2), 
1:c2:decimal(15,2), 2:ROW__ID:struct]
+  vectorizationSchemaColumns: 
[0:c1:decimal(15,2)/DECIMAL_64, 1:c2:decimal(15,2)/DECIMAL_64, 
2:ROW__ID:struct]
   Select Operator
 expressions: c1 (type: decimal(15,2))
 outputColumnNames: c1
@@ -283,7 +283,7 @@ STAGE PLANS:
 Group By Operator
   aggregations: sum(c1)
   Group By Vectorization:
-  aggregators: VectorUDAFSumDecimal(col 
0:decimal(15,2)) -> decimal(25,2)
+  aggregators: VectorUDAFSumDecimal64ToDecimal(col 
0:decimal(15,2)/DECIMAL_64) -> decimal(25,2)
   className: VectorGroupByOperator
   groupByMode: HASH
   native: false
@@ -308,8 +308,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -317,7 +316,7 @@ STAGE PLANS:
 rowBatchContext:
 dataColumnCount: 2
 includeColumns: [0]
-dataColumns: c1:decimal(15,2), c2:decimal(15,2)
+dataColumns: c1:decimal(15,2)/DECIMAL_64, 
c2:decimal(15,2)/DECIMAL_64
 partitionColumnCount: 0
 scratchColumnTypeNames: []
 Reducer 2 
@@ -489,7 +488,7 @@ STAGE PLANS:
   Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE 
Column stats: COMPLETE
   TableScan Vectorization:
   native: true
-  vectorizationSchemaColumns: [0:c1:decimal(15,2), 
1:c2:decimal(15,2), 2:ROW__ID:struct]
+  vectorizationSchemaColumns: 
[0:c1:decimal(15,2)/DECIMAL_64, 1:c2:decimal(15,2)/DECIMAL_64, 
2:ROW__ID:struct]
   Select Operator
 expressions: c1 (type: decimal(15,2)), c2 (type: 
decimal(15,2))
 outputColumnNames: c1, c2
@@ -501,10 +500,10 @@ STAGE PLANS:
 Group By Operator
   aggregations: sum(c1)
   Group By Vectorization:
-  aggregators: VectorUDAFSumDecimal(col 
0:decimal(15,2)) -> decimal(25,2)
+  aggregators: VectorUDAFSumDecimal64ToDecimal(col 
0:decimal(15,2)/DECIMAL_64) -> decimal(25,2)
   className: VectorGroupByOperator
   groupByMode: HASH
-  keyExpressions: col 0:decimal(15,2), col 
1:decimal(15,2)
+  keyExpressions: ConvertDecimal64ToDecimal(col 
0:decimal(15,2)/DECIMAL_64) -> 3:decimal(15,2), ConvertDecimal64ToDecimal(col 
1:decimal(15,2)/DECIMAL_64) -> 4:decimal(15,2)
   native: false
   vectorProcessingMode: HASH
   projectedOutputColumnNums: [0]
@@ -530,8 +529,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -539,9 +537,9 @@ STAGE PLANS:
 rowBatchContext:
 dataColumnCount: 2
 includeColumns: [0, 1]
-

[08/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out 
b/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
index 395939a..c35156e 100644
--- a/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
@@ -166,8 +166,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out 
b/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
index bc9d102..c36c9ec 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
@@ -109,8 +109,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -178,8 +178,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -262,8 +262,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -331,8 +331,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out 
b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
index 16c1650..ecac4da 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
@@ -269,8 +269,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -338,8 +338,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 

[22/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out 
b/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out
index deb9f67..1ef50ca 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out
@@ -109,8 +109,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -226,8 +226,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -343,8 +343,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -460,8 +460,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -577,8 +577,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -694,8 +694,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -811,8 +811,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -928,8 +928,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -1045,8 +1045,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 

[23/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out 
b/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
index 6cd1e8d..30a6770 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
@@ -111,8 +111,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -291,8 +291,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -465,7 +465,7 @@ STAGE PLANS:
   Statistics: Num rows: 12289 Data size: 2662128 Basic stats: 
COMPLETE Column stats: NONE
   TableScan Vectorization:
   native: true
-  vectorizationSchemaColumns: [0:cdouble:double, 
1:cdecimal1:decimal(11,5), 2:cdecimal2:decimal(16,0), 3:cint:int, 
4:ROW__ID:struct]
+  vectorizationSchemaColumns: [0:cdouble:double, 
1:cdecimal1:decimal(11,5)/DECIMAL_64, 2:cdecimal2:decimal(16,0)/DECIMAL_64, 
3:cint:int, 4:ROW__ID:struct]
   Select Operator
 expressions: cdecimal1 (type: decimal(11,5)), cdecimal2 
(type: decimal(16,0)), cint (type: int)
 outputColumnNames: cdecimal1, cdecimal2, cint
@@ -477,7 +477,7 @@ STAGE PLANS:
 Group By Operator
   aggregations: count(cdecimal1), max(cdecimal1), 
min(cdecimal1), sum(cdecimal1), count(cdecimal2), max(cdecimal2), 
min(cdecimal2), sum(cdecimal2), count()
   Group By Vectorization:
-  aggregators: VectorUDAFCount(col 1:decimal(11,5)) -> 
bigint, VectorUDAFMaxDecimal(col 1:decimal(11,5)) -> decimal(11,5), 
VectorUDAFMinDecimal(col 1:decimal(11,5)) -> decimal(11,5), 
VectorUDAFSumDecimal(col 1:decimal(11,5)) -> decimal(21,5), VectorUDAFCount(col 
2:decimal(16,0)) -> bigint, VectorUDAFMaxDecimal(col 2:decimal(16,0)) -> 
decimal(16,0), VectorUDAFMinDecimal(col 2:decimal(16,0)) -> decimal(16,0), 
VectorUDAFSumDecimal(col 2:decimal(16,0)) -> decimal(26,0), 
VectorUDAFCountStar(*) -> bigint
+  aggregators: VectorUDAFCount(col 
1:decimal(11,5)/DECIMAL_64) -> bigint, VectorUDAFMaxDecimal64(col 
1:decimal(11,5)/DECIMAL_64) -> decimal(11,5)/DECIMAL_64, 
VectorUDAFMinDecimal64(col 1:decimal(11,5)/DECIMAL_64) -> 
decimal(11,5)/DECIMAL_64, VectorUDAFSumDecimal64ToDecimal(col 
1:decimal(11,5)/DECIMAL_64) -> decimal(21,5), VectorUDAFCount(col 
2:decimal(16,0)/DECIMAL_64) -> bigint, VectorUDAFMaxDecimal64(col 
2:decimal(16,0)/DECIMAL_64) -> decimal(16,0)/DECIMAL_64, 
VectorUDAFMinDecimal64(col 2:decimal(16,0)/DECIMAL_64) -> 
decimal(16,0)/DECIMAL_64, VectorUDAFSumDecimal64ToDecimal(col 
2:decimal(16,0)/DECIMAL_64) -> decimal(26,0), VectorUDAFCountStar(*) -> bigint
   className: VectorGroupByOperator
   groupByMode: HASH
   keyExpressions: col 3:int
@@ -506,8 +506,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -515,7 +514,7 @@ STAGE PLANS:
 rowBatchContext:
 dataColumnCount: 4
 includeColumns: [1, 2, 3]
-dataColumns: cdouble:double, cdecimal1:decimal(11,5), 
cdecimal2:decimal(16,0), cint:int
+dataColumns: cdouble:double, 

[01/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline) [Forced Update!]

2018-06-18 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/master-txnstats be3039587 -> 1d46608e8 (forced update)


http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out
--
diff --git a/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out 
b/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out
index 688d0ed..84f9573 100644
--- a/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out
@@ -284,8 +284,8 @@ STAGE PLANS:
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
-  inputFormatFeatureSupport: []
-  featureSupportInUse: []
+  inputFormatFeatureSupport: [DECIMAL_64]
+  featureSupportInUse: [DECIMAL_64]
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   allNative: false
   usesVectorUDFAdaptor: false
@@ -572,8 +572,8 @@ STAGE PLANS:
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
-  inputFormatFeatureSupport: []
-  featureSupportInUse: []
+  inputFormatFeatureSupport: [DECIMAL_64]
+  featureSupportInUse: [DECIMAL_64]
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   allNative: false
   usesVectorUDFAdaptor: false
@@ -864,8 +864,8 @@ STAGE PLANS:
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
-  inputFormatFeatureSupport: []
-  featureSupportInUse: []
+  inputFormatFeatureSupport: [DECIMAL_64]
+  featureSupportInUse: [DECIMAL_64]
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   allNative: false
   usesVectorUDFAdaptor: false
@@ -1134,8 +1134,8 @@ STAGE PLANS:
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
-  inputFormatFeatureSupport: []
-  featureSupportInUse: []
+  inputFormatFeatureSupport: [DECIMAL_64]
+  featureSupportInUse: [DECIMAL_64]
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   allNative: false
   usesVectorUDFAdaptor: false
@@ -1258,8 +1258,8 @@ STAGE PLANS:
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
-  inputFormatFeatureSupport: []
-  featureSupportInUse: []
+  inputFormatFeatureSupport: [DECIMAL_64]
+  featureSupportInUse: [DECIMAL_64]
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   allNative: false
   usesVectorUDFAdaptor: false

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorized_mapjoin.q.out
--
diff --git a/ql/src/test/results/clientpositive/vectorized_mapjoin.q.out 
b/ql/src/test/results/clientpositive/vectorized_mapjoin.q.out
index 3a1c0e7..51af71a 100644
--- a/ql/src/test/results/clientpositive/vectorized_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_mapjoin.q.out
@@ -111,8 +111,8 @@ STAGE PLANS:
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
-  inputFormatFeatureSupport: []
-  featureSupportInUse: []
+  inputFormatFeatureSupport: [DECIMAL_64]
+  featureSupportInUse: [DECIMAL_64]
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   allNative: false
   usesVectorUDFAdaptor: false

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
--
diff --git a/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out 
b/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
index e9a0e45..3b775a1 100644
--- a/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
@@ -132,8 +132,8 @@ STAGE PLANS:
   Map Vectorization:
   enabled: true
   enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
-  inputFormatFeatureSupport: []
-  featureSupportInUse: []
+  inputFormatFeatureSupport: [DECIMAL_64]
+  featureSupportInUse: [DECIMAL_64]
   inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
   allNative: false
   usesVectorUDFAdaptor: false


[14/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out 
b/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out
index e3d52d2..93b8655 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out
@@ -74,7 +74,7 @@ STAGE PLANS:
   Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE 
Column stats: NONE
   TableScan Vectorization:
   native: true
-  vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 
2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 
8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 
11:ROW__ID:struct]
+  vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 
2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 
8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 
11:ROW__ID:struct]
   Reduce Output Operator
 key expressions: i (type: int), s (type: string), b (type: 
bigint)
 sort order: +++
@@ -93,8 +93,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -102,7 +101,7 @@ STAGE PLANS:
 rowBatchContext:
 dataColumnCount: 11
 includeColumns: [2, 3, 7]
-dataColumns: t:tinyint, si:smallint, i:int, b:bigint, 
f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), 
bin:binary
+dataColumns: t:tinyint, si:smallint, i:int, b:bigint, 
f:float, d:double, bo:boolean, s:string, ts:timestamp, 
dec:decimal(10,0)/DECIMAL_64, bin:binary
 partitionColumnCount: 0
 scratchColumnTypeNames: []
 Reducer 2 
@@ -297,7 +296,7 @@ STAGE PLANS:
   Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE 
Column stats: NONE
   TableScan Vectorization:
   native: true
-  vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 
2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 
8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 
11:ROW__ID:struct]
+  vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 
2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 
8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 
11:ROW__ID:struct]
   Reduce Output Operator
 key expressions: d (type: double), s (type: string), f 
(type: float)
 sort order: +++
@@ -316,8 +315,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: true
 usesVectorUDFAdaptor: false
@@ -325,7 +323,7 @@ STAGE PLANS:
 rowBatchContext:
 dataColumnCount: 11
 includeColumns: [4, 5, 7]
-dataColumns: t:tinyint, si:smallint, i:int, b:bigint, 
f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), 
bin:binary
+dataColumns: t:tinyint, si:smallint, i:int, b:bigint, 
f:float, d:double, bo:boolean, s:string, ts:timestamp, 
dec:decimal(10,0)/DECIMAL_64, bin:binary
 partitionColumnCount: 0
 scratchColumnTypeNames: []
 Reducer 2 
@@ -520,7 +518,7 @@ STAGE PLANS:
   Statistics: Num rows: 1 Data size: 228 Basic stats: COMPLETE 
Column stats: NONE
   TableScan Vectorization:
   native: true
-  vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 
2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 
8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 
11:ROW__ID:struct]

[13/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out 
b/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
index 8ad2017..c66f3d4 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
@@ -288,8 +288,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -580,8 +580,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -876,8 +876,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -1150,8 +1150,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -1280,8 +1280,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out 
b/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out
index de3c6e6..7e78360 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out
@@ -93,8 +93,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -226,8 +226,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false


[27/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_llap_io.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_llap_io.q.out
 
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_llap_io.q.out
index 7259b33..36b53e5 100644
--- 
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_llap_io.q.out
+++ 
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_llap_io.q.out
@@ -88,8 +88,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -200,8 +199,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -314,8 +312,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -439,8 +436,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -590,8 +586,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -736,8 +731,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -875,8 +869,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -1028,8 +1021,7 @@ STAGE PLANS:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
 inputFormatFeatureSupport: [DECIMAL_64]
-vectorizationSupportRemovedReasons: [DECIMAL_64 disabled 
because LLAP is enabled]
-featureSupportInUse: []
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 

[25/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_case_when_2.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/vector_case_when_2.q.out 
b/ql/src/test/results/clientpositive/llap/vector_case_when_2.q.out
index b29fd4b..9be5235 100644
--- a/ql/src/test/results/clientpositive/llap/vector_case_when_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_case_when_2.q.out
@@ -426,8 +426,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: true
@@ -711,8 +711,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: true
 usesVectorUDFAdaptor: true

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out 
b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
index bbaa05c..f801856 100644
--- a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
@@ -171,8 +171,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_2.q.out 
b/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
index be7c367..73e8060 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
@@ -132,8 +132,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false
@@ -328,8 +328,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-featureSupportInUse: []
+inputFormatFeatureSupport: [DECIMAL_64]
+featureSupportInUse: [DECIMAL_64]
 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 allNative: false
 usesVectorUDFAdaptor: false

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_4.q.out 
b/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
index ca4acf1..a418e7a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
@@ -174,8 +174,8 @@ STAGE PLANS:
 Map Vectorization:
 enabled: true
 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-inputFormatFeatureSupport: []
-

[02/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

2018-06-18 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_outer_join6.q.out
--
diff --git a/ql/src/test/results/clientpositive/vector_outer_join6.q.out 
b/ql/src/test/results/clientpositive/vector_outer_join6.q.out
index e2d6cc8..7151965 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join6.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join6.q.out
@@ -130,7 +130,7 @@ POSTHOOK: query: explain vectorization detail formatted
 select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from
(select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 
from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj 
left outer join tjoin3 on tj2c1 = tjoin3.c1
 POSTHOOK: type: QUERY
-{"PLAN 
VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled
 IS true"]},"STAGE DEPENDENCIES":{"Stage-7":{"ROOT 
STAGE":"TRUE"},"Stage-5":{"DEPENDENT STAGES":"Stage-7"},"Stage-0":{"DEPENDENT 
STAGES":"Stage-5"}},"STAGE PLANS":{"Stage-7":{"Map Reduce Local Work":{"Alias 
-> Map Local Tables:":{"$hdt$_0:$hdt$_0:tjoin2_n0":{"Fetch 
Operator":{"limit:":"-1"}},"$hdt$_1:tjoin3":{"Fetch 
Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator 
Tree:":{"$hdt$_0:$hdt$_0:tjoin2_n0":{"TableScan":{"alias:":"tjoin2_n0","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num
 rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: 
NONE","table:":"tjoin2_n0","isTempTable:":"false","OperatorId:":"TS_0","children":{"Select
 Operator":{"expressions:":"rnum (type: int), c1 (type: 
int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num
 rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","Operato
 rId:":"SEL_1","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 
(type: int)","1":"_col1 (type: 
int)"},"OperatorId:":"HASHTABLESINK_21"}},"$hdt$_1:tjoin3":{"TableScan":{"alias:":"tjoin3","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num
 rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: 
NONE","table:":"tjoin3","isTempTable:":"false","OperatorId:":"TS_8","children":{"Select
 Operator":{"expressions:":"rnum (type: int), c1 (type: 
int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num
 rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: 
NONE","OperatorId:":"SEL_9","children":{"HashTable Sink 
Operator":{"keys:":{"0":"_col2 (type: int)","1":"_col1 (type: 
int)"},"OperatorId:":"HASHTABLESINK_19"},"Stage-5":{"Map Reduce":{"Map 
Operator 
Tree:":[{"TableScan":{"alias:":"tjoin1_n0","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num
 rows: 3 Data size: 32 Basic stats: COMPLETE C
 olumn stats: NONE","table:":"tjoin1_n0","TableScan 
Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:rnum:int, 
1:c1:int, 2:c2:int, 
3:ROW__ID:struct]"},"isTempTable:":"false","OperatorId:":"TS_2","children":{"Select
 Operator":{"expressions:":"rnum (type: int), c1 (type: 
int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Select
 
Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0,
 1]"},"Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE Column 
stats: NONE","OperatorId:":"SEL_23","children":{"Map Join 
Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col2":"1:_col0"},"condition
 map:":[{"":"Right Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: 
int)","1":"_col1 (type: int)"},"Map Join 
Vectorization:":{"bigTableKeyExpressions:":["col 
1:int"],"bigTableValueExpressions:":["col 0:int"],"className:":"VectorMapJoinOp
 
erator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable
 IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One 
MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS 
true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS 
true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS 
false"]},"outputColumnNames:":["_col0","_col1","_col2"],"Statistics:":"Num 
rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: 
NONE","OperatorId:":"MAPJOIN_24","children":{"Select 
Operator":{"expressions:":"_col2 (type: int), _col0 (type: int), _col1 (type: 
int)","columnExprMap:":{"_col0":"_col2","_col1":"_col0","_col2":"_col1"},"outputColumnNames:":["_col0","_col1","_col2"],"Select
 
Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2,
 0, 1]"},"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column 
stats: NONE","OperatorId:":"SE
 L_25","children":{"Map Join 
Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col3":"1:_col0"},"condition
 map:":[{"":"Left Outer 

hive git commit: HIVE-19853: Arrow serializer needs to create a TimeStampMicroTZVector instead of TimeStampMicroVector (Teddy Choi, reviewed by Matt McCline)

2018-06-18 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/branch-3 1998bfbda -> 6c329a297


HIVE-19853: Arrow serializer needs to create a TimeStampMicroTZVector instead 
of TimeStampMicroVector (Teddy Choi, reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6c329a29
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6c329a29
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6c329a29

Branch: refs/heads/branch-3
Commit: 6c329a297a295b22fcda1f5560eb6d07ebbb5522
Parents: 1998bfb
Author: Matt McCline 
Authored: Mon Jun 18 15:55:00 2018 -0500
Committer: Matt McCline 
Committed: Mon Jun 18 15:55:54 2018 -0500

--
 .../hadoop/hive/ql/io/arrow/Deserializer.java   | 94 +++-
 .../hadoop/hive/ql/io/arrow/Serializer.java | 15 ++--
 2 files changed, 40 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/6c329a29/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
index 6e09d39..edc4b39 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
@@ -29,9 +29,7 @@ import org.apache.arrow.vector.IntVector;
 import org.apache.arrow.vector.IntervalDayVector;
 import org.apache.arrow.vector.IntervalYearVector;
 import org.apache.arrow.vector.SmallIntVector;
-import org.apache.arrow.vector.TimeStampMicroVector;
-import org.apache.arrow.vector.TimeStampMilliVector;
-import org.apache.arrow.vector.TimeStampNanoVector;
+import org.apache.arrow.vector.TimeStampVector;
 import org.apache.arrow.vector.TinyIntVector;
 import org.apache.arrow.vector.VarBinaryVector;
 import org.apache.arrow.vector.VarCharVector;
@@ -268,35 +266,11 @@ class Deserializer {
 }
 break;
   case TIMESTAMPMILLI:
-{
-  for (int i = 0; i < size; i++) {
-if (arrowVector.isNull(i)) {
-  VectorizedBatchUtil.setNullColIsNullValue(hiveVector, i);
-} else {
-  hiveVector.isNull[i] = false;
-
-  // Time = second + sub-second
-  final long timeInMillis = ((TimeStampMilliVector) 
arrowVector).get(i);
-  final TimestampColumnVector timestampColumnVector = 
(TimestampColumnVector) hiveVector;
-  int subSecondInNanos = (int) ((timeInMillis % MILLIS_PER_SECOND) 
* NS_PER_MILLIS);
-  long second = timeInMillis / MILLIS_PER_SECOND;
-
-  // A nanosecond value should not be negative
-  if (subSecondInNanos < 0) {
-
-// So add one second to the negative nanosecond value to make 
it positive
-subSecondInNanos += NS_PER_SECOND;
-
-// Subtract one second from the second value because we added 
one second
-second -= 1;
-  }
-  timestampColumnVector.time[i] = second * MILLIS_PER_SECOND;
-  timestampColumnVector.nanos[i] = subSecondInNanos;
-}
-  }
-}
-break;
+  case TIMESTAMPMILLITZ:
   case TIMESTAMPMICRO:
+  case TIMESTAMPMICROTZ:
+  case TIMESTAMPNANO:
+  case TIMESTAMPNANOTZ:
 {
   for (int i = 0; i < size; i++) {
 if (arrowVector.isNull(i)) {
@@ -305,40 +279,36 @@ class Deserializer {
   hiveVector.isNull[i] = false;
 
   // Time = second + sub-second
-  final long timeInMicros = ((TimeStampMicroVector) 
arrowVector).get(i);
-  final TimestampColumnVector timestampColumnVector = 
(TimestampColumnVector) hiveVector;
-  int subSecondInNanos = (int) ((timeInMicros % MICROS_PER_SECOND) 
* NS_PER_MICROS);
-  long second = timeInMicros / MICROS_PER_SECOND;
-
-  // A nanosecond value should not be negative
-  if (subSecondInNanos < 0) {
-
-// So add one second to the negative nanosecond value to make 
it positive
-subSecondInNanos += NS_PER_SECOND;
-
-// Subtract one second from the second value because we added 
one second
-second -= 1;
+  final long time = ((TimeStampVector) arrowVector).get(i);
+  long second;
+  int subSecondInNanos;
+  switch (minorType) {
+case TIMESTAMPMILLI:
+case TIMESTAMPMILLITZ:
+  {
+subSecondInNanos = (int) ((time % MILLIS_PER_SECOND) * 
NS_PER_MILLIS);
+second = time / MILLIS_PER_SECOND;
+  }
+  

hive git commit: HIVE-19853: Arrow serializer needs to create a TimeStampMicroTZVector instead of TimeStampMicroVector (Teddy Choi, reviewed by Matt McCline)

2018-06-18 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/master c4eb647c6 -> 1a610cc54


HIVE-19853: Arrow serializer needs to create a TimeStampMicroTZVector instead 
of TimeStampMicroVector (Teddy Choi, reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1a610cc5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1a610cc5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1a610cc5

Branch: refs/heads/master
Commit: 1a610cc545d39b9e9116c5b90108197853d0364c
Parents: c4eb647
Author: Matt McCline 
Authored: Mon Jun 18 15:55:00 2018 -0500
Committer: Matt McCline 
Committed: Mon Jun 18 15:55:00 2018 -0500

--
 .../hadoop/hive/ql/io/arrow/Deserializer.java   | 94 +++-
 .../hadoop/hive/ql/io/arrow/Serializer.java | 15 ++--
 2 files changed, 40 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1a610cc5/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
index 6e09d39..edc4b39 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
@@ -29,9 +29,7 @@ import org.apache.arrow.vector.IntVector;
 import org.apache.arrow.vector.IntervalDayVector;
 import org.apache.arrow.vector.IntervalYearVector;
 import org.apache.arrow.vector.SmallIntVector;
-import org.apache.arrow.vector.TimeStampMicroVector;
-import org.apache.arrow.vector.TimeStampMilliVector;
-import org.apache.arrow.vector.TimeStampNanoVector;
+import org.apache.arrow.vector.TimeStampVector;
 import org.apache.arrow.vector.TinyIntVector;
 import org.apache.arrow.vector.VarBinaryVector;
 import org.apache.arrow.vector.VarCharVector;
@@ -268,35 +266,11 @@ class Deserializer {
 }
 break;
   case TIMESTAMPMILLI:
-{
-  for (int i = 0; i < size; i++) {
-if (arrowVector.isNull(i)) {
-  VectorizedBatchUtil.setNullColIsNullValue(hiveVector, i);
-} else {
-  hiveVector.isNull[i] = false;
-
-  // Time = second + sub-second
-  final long timeInMillis = ((TimeStampMilliVector) 
arrowVector).get(i);
-  final TimestampColumnVector timestampColumnVector = 
(TimestampColumnVector) hiveVector;
-  int subSecondInNanos = (int) ((timeInMillis % MILLIS_PER_SECOND) 
* NS_PER_MILLIS);
-  long second = timeInMillis / MILLIS_PER_SECOND;
-
-  // A nanosecond value should not be negative
-  if (subSecondInNanos < 0) {
-
-// So add one second to the negative nanosecond value to make 
it positive
-subSecondInNanos += NS_PER_SECOND;
-
-// Subtract one second from the second value because we added 
one second
-second -= 1;
-  }
-  timestampColumnVector.time[i] = second * MILLIS_PER_SECOND;
-  timestampColumnVector.nanos[i] = subSecondInNanos;
-}
-  }
-}
-break;
+  case TIMESTAMPMILLITZ:
   case TIMESTAMPMICRO:
+  case TIMESTAMPMICROTZ:
+  case TIMESTAMPNANO:
+  case TIMESTAMPNANOTZ:
 {
   for (int i = 0; i < size; i++) {
 if (arrowVector.isNull(i)) {
@@ -305,40 +279,36 @@ class Deserializer {
   hiveVector.isNull[i] = false;
 
   // Time = second + sub-second
-  final long timeInMicros = ((TimeStampMicroVector) 
arrowVector).get(i);
-  final TimestampColumnVector timestampColumnVector = 
(TimestampColumnVector) hiveVector;
-  int subSecondInNanos = (int) ((timeInMicros % MICROS_PER_SECOND) 
* NS_PER_MICROS);
-  long second = timeInMicros / MICROS_PER_SECOND;
-
-  // A nanosecond value should not be negative
-  if (subSecondInNanos < 0) {
-
-// So add one second to the negative nanosecond value to make 
it positive
-subSecondInNanos += NS_PER_SECOND;
-
-// Subtract one second from the second value because we added 
one second
-second -= 1;
+  final long time = ((TimeStampVector) arrowVector).get(i);
+  long second;
+  int subSecondInNanos;
+  switch (minorType) {
+case TIMESTAMPMILLI:
+case TIMESTAMPMILLITZ:
+  {
+subSecondInNanos = (int) ((time % MILLIS_PER_SECOND) * 
NS_PER_MILLIS);
+second = time / MILLIS_PER_SECOND;
+  }
+  

hive git commit: HIVE-19569: alter table db1.t1 rename db2.t2 generates MetaStoreEventListener.onDropTable() (Mahesh Kumar Behera, reviewed by Sankar Hariappan)

2018-06-18 Thread sankarh
Repository: hive
Updated Branches:
  refs/heads/branch-3 90b442c1b -> 1998bfbda


HIVE-19569: alter table db1.t1 rename db2.t2 generates 
MetaStoreEventListener.onDropTable() (Mahesh Kumar Behera, reviewed by Sankar 
Hariappan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1998bfbd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1998bfbd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1998bfbd

Branch: refs/heads/branch-3
Commit: 1998bfbdaff59b9e01f037f502376ad5d2d6e73a
Parents: 90b442c
Author: Sankar Hariappan 
Authored: Mon Jun 18 13:34:32 2018 -0700
Committer: Sankar Hariappan 
Committed: Mon Jun 18 13:34:32 2018 -0700

--
 .../hadoop/hive/ql/TestTxnConcatenate.java  | 24 +++--
 .../hadoop/hive/metastore/HiveAlterHandler.java | 99 
 .../hadoop/hive/metastore/HiveMetaStore.java| 31 +++---
 .../TestTablesCreateDropAlterTruncate.java  |  2 +-
 4 files changed, 47 insertions(+), 109 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1998bfbd/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
--
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
index 511198a..0e436e1 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
-import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -225,14 +224,19 @@ public class TestTxnConcatenate extends 
TxnCommandsBaseForTests {
 Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf,
 "select count(*) from NEXT_WRITE_ID where NWI_TABLE='s'"));
 
-//this causes MetaStoreEvenListener.onDropTable()/onCreateTable() to 
execute and the data
-//files are just moved under new table.  This can't work since a drop 
table in Acid removes
-//the relevant table metadata (like writeid, etc.), so writeIds in file 
names/ROW_IDs
-//no longer make sense.  (In fact 'select ...' returns nothing since there 
is no NEXT_WRITE_ID
-//entry for the 'new' table and all existing data is 'above HWM'. see 
HIVE-19569
-CommandProcessorResponse cpr =
-runStatementOnDriverNegative("alter table mydb1.S RENAME TO 
mydb2.bar");
-Assert.assertTrue(cpr.getErrorMessage() != null && cpr.getErrorMessage()
-.contains("Changing database name of a transactional table mydb1.s is 
not supported."));
+runStatementOnDriver("alter table mydb1.S RENAME TO mydb2.bar");
+
+Assert.assertEquals(
+TxnDbUtil.queryToString(hiveConf, "select * from 
COMPLETED_TXN_COMPONENTS"), 2,
+TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from COMPLETED_TXN_COMPONENTS where 
CTC_TABLE='bar'"));
+Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from COMPACTION_QUEUE where CQ_TABLE='bar'"));
+Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from WRITE_SET where WS_TABLE='bar'"));
+Assert.assertEquals(2, TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from TXN_TO_WRITE_ID where T2W_TABLE='bar'"));
+Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from NEXT_WRITE_ID where NWI_TABLE='bar'"));
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/1998bfbd/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index ed53c90..f328ad1 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -22,11 +22,8 @@ import com.google.common.collect.Lists;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
-import 

hive git commit: Revert "Merge branch 'branch-3' of http://git-wip-us.apache.org/repos/asf/hive into branch-3"

2018-06-18 Thread djaiswal
Repository: hive
Updated Branches:
  refs/heads/branch-3 a2c08792a -> 90b442c1b


Revert "Merge branch 'branch-3' of http://git-wip-us.apache.org/repos/asf/hive 
into branch-3"

This reverts commit 9d80c2d6a1a7bf378d2c6a28006d09f3c3643e5f, reversing
changes made to 6f9a76744ea3acb015a72e8d1e94a335ac80f42f.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/90b442c1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/90b442c1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/90b442c1

Branch: refs/heads/branch-3
Commit: 90b442c1b54f786558e04cec60715756e7accd1d
Parents: a2c0879
Author: Deepak Jaiswal 
Authored: Mon Jun 18 10:57:53 2018 -0700
Committer: Deepak Jaiswal 
Committed: Mon Jun 18 10:57:53 2018 -0700

--
 .../TestTransactionalValidationListener.java| 127 ---
 .../TransactionalValidationListener.java|  23 +---
 .../metastore/client/MetaStoreClientTest.java   |   2 +-
 3 files changed, 6 insertions(+), 146 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/90b442c1/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestTransactionalValidationListener.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestTransactionalValidationListener.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestTransactionalValidationListener.java
deleted file mode 100644
index 3aaad22..000
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestTransactionalValidationListener.java
+++ /dev/null
@@ -1,127 +0,0 @@
-package org.apache.hadoop.hive.metastore;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.client.MetaStoreClientTest;
-import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder;
-import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
-import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-@RunWith(Parameterized.class)
-public class TestTransactionalValidationListener extends MetaStoreClientTest {
-
-  private AbstractMetaStoreService metaStore;
-  private IMetaStoreClient client;
-  private boolean createdCatalogs = false;
-
-  @BeforeClass
-  public static void startMetaStores() {
-Map msConf = new 
HashMap();
-
-// Enable TransactionalValidationListener + create.as.acid
-Map extraConf = new HashMap<>();
-extraConf.put("metastore.create.as.acid", "true");
-extraConf.put("hive.txn.manager", 
"org.apache.hadoop.hive.ql.lockmgr.DbTxnManager");
-extraConf.put("hive.support.concurrency", "true");
-startMetaStores(msConf, extraConf);
-  }
-
-  @Before
-  public void setUp() throws Exception {
-// Get new client
-client = metaStore.getClient();
-if (!createdCatalogs) {
-  createCatalogs();
-  createdCatalogs = true;
-}
-  }
-
-  @After
-  public void tearDown() throws Exception {
-try {
-  if (client != null) {
-client.close();
-  }
-} finally {
-  client = null;
-}
-  }
-
-  public TestTransactionalValidationListener(String name, 
AbstractMetaStoreService metaStore) throws Exception {
-this.metaStore = metaStore;
-  }
-
-  private void createCatalogs() throws Exception {
-String[] catNames = {"spark", "myapp"};
-String[] location = {MetaStoreTestUtils.getTestWarehouseDir("spark"),
- MetaStoreTestUtils.getTestWarehouseDir("myapp")};
-
-for (int i = 0; i < catNames.length; i++) {
-  Catalog cat = new CatalogBuilder()
-  .setName(catNames[i])
-  .setLocation(location[i])
-  .build();
-  client.createCatalog(cat);
-  File dir = new File(cat.getLocationUri());
-  Assert.assertTrue(dir.exists() && dir.isDirectory());
-}
-  }
-
-  private Table 

hive git commit: HIVE-19921: Fix perf duration and queue name in HiveProtoLoggingHook (Harish JP, reviewd by Anishek Agarwal)

2018-06-18 Thread anishek
Repository: hive
Updated Branches:
  refs/heads/master 4810511d6 -> c4eb647c6


HIVE-19921: Fix perf duration and queue name in HiveProtoLoggingHook (Harish 
JP, reviewd by Anishek Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c4eb647c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c4eb647c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c4eb647c

Branch: refs/heads/master
Commit: c4eb647c6da499541ce178bf82433c26f25e
Parents: 4810511
Author: Anishek Agarwal 
Authored: Mon Jun 18 09:08:34 2018 -0700
Committer: Anishek Agarwal 
Committed: Mon Jun 18 09:08:34 2018 -0700

--
 .../hive/ql/hooks/HiveProtoLoggingHook.java |  6 +++-
 .../hive/ql/hooks/TestHiveProtoLoggingHook.java | 29 +++-
 2 files changed, 27 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/c4eb647c/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java 
b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
index eef6ac9..bddca1a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
@@ -366,6 +366,7 @@ public class HiveProtoLoggingHook implements 
ExecuteWithHookContext {
   ApplicationId llapId = determineLlapId(conf, executionMode);
   if (llapId != null) {
 addMapEntry(builder, OtherInfoType.LLAP_APP_ID, llapId.toString());
+
builder.setQueue(conf.get(HiveConf.ConfVars.LLAP_DAEMON_QUEUE_NAME.varname));
   }
 
   conf.stripHiddenConfigurations(conf);
@@ -391,7 +392,10 @@ public class HiveProtoLoggingHook implements 
ExecuteWithHookContext {
 builder.setOperationId(hookContext.getOperationId());
   }
   addMapEntry(builder, OtherInfoType.STATUS, Boolean.toString(success));
-  JSONObject perfObj = new 
JSONObject(hookContext.getPerfLogger().getEndTimes());
+  JSONObject perfObj = new JSONObject();
+  for (String key : hookContext.getPerfLogger().getEndTimes().keySet()) {
+perfObj.put(key, hookContext.getPerfLogger().getDuration(key));
+  }
   addMapEntry(builder, OtherInfoType.PERF, perfObj.toString());
 
   return builder.build();

http://git-wip-us.apache.org/repos/asf/hive/blob/c4eb647c/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java 
b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java
index 98b73e8..96fb73c 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java
@@ -22,6 +22,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashSet;
+import java.util.Map;
 
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -46,6 +47,9 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
 
 public class TestHiveProtoLoggingHook {
 
@@ -106,6 +110,8 @@ public class TestHiveProtoLoggingHook {
   @Test
   public void testPostEventLog() throws Exception {
 context.setHookType(HookType.POST_EXEC_HOOK);
+context.getPerfLogger().PerfLogBegin("test", "LogTest");
+context.getPerfLogger().PerfLogEnd("test", "LogTest");
 
 EventLogger evtLogger = new EventLogger(conf, SystemClock.getInstance());
 evtLogger.handle(context);
@@ -119,7 +125,11 @@ public class TestHiveProtoLoggingHook {
 Assert.assertEquals("test_op_id", event.getOperationId());
 
 assertOtherInfo(event, OtherInfoType.STATUS, Boolean.TRUE.toString());
-assertOtherInfo(event, OtherInfoType.PERF, null);
+String val = findOtherInfo(event, OtherInfoType.PERF);
+Map map = new ObjectMapper().readValue(val,
+new TypeReference>() {});
+// This should be really close to zero.
+Assert.assertTrue("Expected LogTest in PERF", map.get("LogTest") < 100);
   }
 
   @Test
@@ -158,15 +168,20 @@ public class TestHiveProtoLoggingHook {
 return event;
   }
 
-  private void assertOtherInfo(HiveHookEventProto event, OtherInfoType key, 
String value) {
+  private String findOtherInfo(HiveHookEventProto event, OtherInfoType key) {
 for (MapFieldEntry otherInfo : event.getOtherInfoList()) {
   if (otherInfo.getKey().equals(key.name())) {
-if 

hive git commit: HIVE-19786: RpcServer cancelTask log message is incorrect (Bharathkrishna Guruvayoor Murali, reviewed by Sahil Takiar)

2018-06-18 Thread stakiar
Repository: hive
Updated Branches:
  refs/heads/master 8c0767625 -> 4810511d6


HIVE-19786: RpcServer cancelTask log message is incorrect (Bharathkrishna 
Guruvayoor Murali, reviewed by Sahil Takiar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4810511d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4810511d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4810511d

Branch: refs/heads/master
Commit: 4810511d6e2b4377b20d70122788d5ad300d8df1
Parents: 8c07676
Author: Bharathkrishna Guruvayoor Murali 
Authored: Mon Jun 18 10:17:11 2018 -0500
Committer: Sahil Takiar 
Committed: Mon Jun 18 10:17:11 2018 -0500

--
 .../src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4810511d/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java
--
diff --git 
a/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java 
b/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java
index f1383d6..babcb54 100644
--- a/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java
+++ b/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java
@@ -101,7 +101,8 @@ public class RpcServer implements Closeable {
 Runnable cancelTask = new Runnable() {
 @Override
 public void run() {
-  LOG.warn("Timed out waiting for test message from Remote 
Spark driver.");
+  LOG.warn("Timed out waiting for the completion of SASL 
negotiation "
+  + "between HiveServer2 and the Remote Spark 
Driver.");
   newRpc.close();
 }
 };



hive git commit: HIVE-19787: Log message when spark-submit has completed (Bharathkrishna Guruvayoor Murali, reviewed by Sahil Takiar)

2018-06-18 Thread stakiar
Repository: hive
Updated Branches:
  refs/heads/master c89cf6d5d -> 8c0767625


HIVE-19787: Log message when spark-submit has completed (Bharathkrishna 
Guruvayoor Murali, reviewed by Sahil Takiar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8c076762
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8c076762
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8c076762

Branch: refs/heads/master
Commit: 8c0767625069418871194f418b99bce8cca1007b
Parents: c89cf6d
Author: Bharathkrishna Guruvayoor Murali 
Authored: Mon Jun 18 10:12:10 2018 -0500
Committer: Sahil Takiar 
Committed: Mon Jun 18 10:12:10 2018 -0500

--
 .../java/org/apache/hive/spark/client/SparkSubmitSparkClient.java  | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8c076762/spark-client/src/main/java/org/apache/hive/spark/client/SparkSubmitSparkClient.java
--
diff --git 
a/spark-client/src/main/java/org/apache/hive/spark/client/SparkSubmitSparkClient.java
 
b/spark-client/src/main/java/org/apache/hive/spark/client/SparkSubmitSparkClient.java
index 1a524b9..31e89b8 100644
--- 
a/spark-client/src/main/java/org/apache/hive/spark/client/SparkSubmitSparkClient.java
+++ 
b/spark-client/src/main/java/org/apache/hive/spark/client/SparkSubmitSparkClient.java
@@ -211,6 +211,8 @@ class SparkSubmitSparkClient extends AbstractSparkClient {
   LOG.warn("Child process exited with code {}", exitCode);
   rpcServer.cancelClient(clientId,
   "Child process (spark-submit) exited before connecting back with 
error log " + errStr.toString());
+} else {
+  LOG.info("Child process (spark-submit) exited successfully.");
 }
   } catch (InterruptedException ie) {
 LOG.warn("Thread waiting on the child process (spark-submit) is 
interrupted, killing the child process.");



hive git commit: HIVE-19602: Refactor inplace progress code in Hive-on-spark progress monitor to use ProgressMonitor instance (Bharathkrishna Guruvayoor Murali, reviewed by Sahil Takiar, Rui Li)

2018-06-18 Thread stakiar
Repository: hive
Updated Branches:
  refs/heads/master 3a6ad2661 -> c89cf6d5d


HIVE-19602: Refactor inplace progress code in Hive-on-spark progress monitor to 
use ProgressMonitor instance (Bharathkrishna Guruvayoor Murali, reviewed by 
Sahil Takiar, Rui Li)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c89cf6d5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c89cf6d5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c89cf6d5

Branch: refs/heads/master
Commit: c89cf6d5de0343493dc629a0073b5c8e88359a6e
Parents: 3a6ad26
Author: Bharathkrishna Guruvayoor Murali 
Authored: Mon Jun 18 10:03:01 2018 -0500
Committer: Sahil Takiar 
Committed: Mon Jun 18 10:03:01 2018 -0500

--
 .../ql/exec/spark/status/SparkJobMonitor.java   | 166 +--
 .../exec/spark/status/SparkProgressMonitor.java | 155 +
 2 files changed, 160 insertions(+), 161 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/c89cf6d5/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
index e78b1cd..3531ac2 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
@@ -22,13 +22,9 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.common.log.InPlaceUpdate;
 import org.apache.hadoop.hive.ql.log.PerfLogger;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.fusesource.jansi.Ansi;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.PrintStream;
-import java.text.DecimalFormat;
-import java.text.NumberFormat;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.HashSet;
@@ -38,8 +34,6 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.concurrent.TimeUnit;
 
-import static org.fusesource.jansi.Ansi.ansi;
-
 abstract class SparkJobMonitor {
 
   protected static final String CLASS_NAME = SparkJobMonitor.class.getName();
@@ -48,6 +42,7 @@ abstract class SparkJobMonitor {
   protected final PerfLogger perfLogger = SessionState.getPerfLogger();
   protected final int checkInterval = 1000;
   protected final long monitorTimeoutInterval;
+  private final InPlaceUpdate inPlaceUpdateFn;
 
   private final Set completed = new HashSet();
   private final int printInterval = 3000;
@@ -61,94 +56,20 @@ abstract class SparkJobMonitor {
 FINISHED
   }
 
-  // in-place progress update related variables
   protected final boolean inPlaceUpdate;
-  private int lines = 0;
-  private final PrintStream out;
-
-  private static final int COLUMN_1_WIDTH = 16;
-  private static final String HEADER_FORMAT = "%16s%10s %13s  %5s  %9s  %7s  
%7s  %6s  ";
-  private static final String STAGE_FORMAT = "%-16s%10s %13s  %5s  %9s  %7s  
%7s  %6s  ";
-  private static final String HEADER = String.format(HEADER_FORMAT,
-  "STAGES", "ATTEMPT", "STATUS", "TOTAL", "COMPLETED", "RUNNING", 
"PENDING", "FAILED");
-  private static final int SEPARATOR_WIDTH = 86;
-  private static final String SEPARATOR = new String(new 
char[SEPARATOR_WIDTH]).replace("\0", "-");
-  private static final String FOOTER_FORMAT = "%-15s  %-30s %-4s  %-25s";
-  private static final int progressBarChars = 30;
-
-  private final NumberFormat secondsFormat = new DecimalFormat("#0.00");
 
   protected SparkJobMonitor(HiveConf hiveConf) {
 monitorTimeoutInterval = hiveConf.getTimeVar(
 HiveConf.ConfVars.SPARK_JOB_MONITOR_TIMEOUT, TimeUnit.SECONDS);
 inPlaceUpdate = InPlaceUpdate.canRenderInPlace(hiveConf) && 
!SessionState.getConsole().getIsSilent();
 console = new SessionState.LogHelper(LOG);
-out = SessionState.LogHelper.getInfoStream();
+inPlaceUpdateFn = new 
InPlaceUpdate(SessionState.LogHelper.getInfoStream());
   }
 
   public abstract int startMonitor();
 
   private void printStatusInPlace(Map 
progressMap) {
-
-StringBuilder reportBuffer = new StringBuilder();
-
-// Num of total and completed tasks
-int sumTotal = 0;
-int sumComplete = 0;
-
-// position the cursor to line 0
-repositionCursor();
-
-// header
-reprintLine(SEPARATOR);
-reprintLineWithColorAsBold(HEADER, Ansi.Color.CYAN);
-reprintLine(SEPARATOR);
-
-SortedSet keys = new TreeSet(progressMap.keySet());
-int idx = 0;
-final int numKey = keys.size();
-for (SparkStage stage : keys) {
-  SparkStageProgress progress = progressMap.get(stage);
-  final int complete = progress.getSucceededTaskCount();
-  

hive git commit: HIVE-19923: Follow up of HIVE-19615, use UnaryFunction instead of prefix (Slim Bouguerra, reviewed by Nishant Bangarwa, Ashutosh Chauhan)

2018-06-18 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/branch-3 9d80c2d6a -> a2c08792a


HIVE-19923: Follow up of HIVE-19615, use UnaryFunction instead of prefix (Slim 
Bouguerra, reviewed by Nishant Bangarwa, Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a2c08792
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a2c08792
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a2c08792

Branch: refs/heads/branch-3
Commit: a2c08792a6ee44adc09f8392ad24f5f3a791b696
Parents: 9d80c2d
Author: Slim Bouguerra 
Authored: Mon Jun 18 07:54:44 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Mon Jun 18 07:55:14 2018 -0700

--
 .../ql/parse/DruidSqlOperatorConverter.java | 35 ++--
 .../clientpositive/druid/druidmini_test1.q.out  |  2 +-
 2 files changed, 34 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/a2c08792/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
index 4db0714..6aa98c0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.parse;
 import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
 import com.google.common.collect.Maps;
 import org.apache.calcite.adapter.druid.DirectOperatorConversion;
 import org.apache.calcite.adapter.druid.DruidExpressions;
@@ -51,6 +52,7 @@ import org.joda.time.Period;
 
 import javax.annotation.Nullable;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.TimeZone;
 
@@ -87,9 +89,9 @@ public class DruidSqlOperatorConverter {
   druidOperatorMap
   .put(SqlStdOperatorTable.SUBSTRING, new 
DruidSqlOperatorConverter.DruidSubstringOperatorConversion());
   druidOperatorMap
-  .put(SqlStdOperatorTable.IS_NULL, new 
UnarySuffixOperatorConversion(SqlStdOperatorTable.IS_NULL, "isnull"));
+  .put(SqlStdOperatorTable.IS_NULL, new 
UnaryFunctionOperatorConversion(SqlStdOperatorTable.IS_NULL, "isnull"));
   druidOperatorMap.put(SqlStdOperatorTable.IS_NOT_NULL,
-  new UnarySuffixOperatorConversion(SqlStdOperatorTable.IS_NOT_NULL, 
"notnull")
+  new UnaryFunctionOperatorConversion(SqlStdOperatorTable.IS_NOT_NULL, 
"notnull")
   );
   druidOperatorMap.put(HiveTruncSqlOperator.INSTANCE, new 
DruidDateTruncOperatorConversion());
   druidOperatorMap.put(HiveToDateSqlOperator.INSTANCE, new 
DruidToDateOperatorConversion());
@@ -346,4 +348,33 @@ public class DruidSqlOperatorConverter {
 );
   }
 
+  public static class UnaryFunctionOperatorConversion implements 
org.apache.calcite.adapter.druid.DruidSqlOperatorConverter {
+
+private final SqlOperator operator;
+private final String druidOperator;
+
+public UnaryFunctionOperatorConversion(SqlOperator operator, String 
druidOperator) {
+  this.operator = operator;
+  this.druidOperator = druidOperator;
+}
+
+@Override public SqlOperator calciteOperator() {
+  return operator;
+}
+
+@Override public String toDruidExpression(RexNode rexNode, RelDataType 
rowType,
+DruidQuery druidQuery) {
+  final RexCall call = (RexCall) rexNode;
+
+  final List druidExpressions = 
DruidExpressions.toDruidExpressions(
+  druidQuery, rowType,
+  call.getOperands());
+
+  if (druidExpressions == null) {
+return null;
+  }
+
+  return DruidQuery.format("%s(%s)", druidOperator, 
Iterables.getOnlyElement(druidExpressions));
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/a2c08792/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
--
diff --git a/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out 
b/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
index 89da36a..4e078aa 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
@@ -814,7 +814,7 @@ STAGE PLANS:
   properties:
 druid.fieldNames vc,vc0
 druid.fieldTypes boolean,boolean
-druid.query.json 

hive git commit: HIVE-19923: Follow up of HIVE-19615, use UnaryFunction instead of prefix (Slim Bouguerra, reviewed by Nishant Bangarwa, Ashutosh Chauhan)

2018-06-18 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 6a16a71ce -> 3a6ad2661


HIVE-19923: Follow up of HIVE-19615, use UnaryFunction instead of prefix (Slim 
Bouguerra, reviewed by Nishant Bangarwa, Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3a6ad266
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3a6ad266
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3a6ad266

Branch: refs/heads/master
Commit: 3a6ad2661e5fdd3e6ce8b8f7ee5a35ddb3bd2c47
Parents: 6a16a71
Author: Slim Bouguerra 
Authored: Mon Jun 18 07:54:44 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Mon Jun 18 07:54:44 2018 -0700

--
 .../ql/parse/DruidSqlOperatorConverter.java | 35 ++--
 .../clientpositive/druid/druidmini_test1.q.out  |  2 +-
 2 files changed, 34 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3a6ad266/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
index 4db0714..6aa98c0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.parse;
 import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
 import com.google.common.collect.Maps;
 import org.apache.calcite.adapter.druid.DirectOperatorConversion;
 import org.apache.calcite.adapter.druid.DruidExpressions;
@@ -51,6 +52,7 @@ import org.joda.time.Period;
 
 import javax.annotation.Nullable;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.TimeZone;
 
@@ -87,9 +89,9 @@ public class DruidSqlOperatorConverter {
   druidOperatorMap
   .put(SqlStdOperatorTable.SUBSTRING, new 
DruidSqlOperatorConverter.DruidSubstringOperatorConversion());
   druidOperatorMap
-  .put(SqlStdOperatorTable.IS_NULL, new 
UnarySuffixOperatorConversion(SqlStdOperatorTable.IS_NULL, "isnull"));
+  .put(SqlStdOperatorTable.IS_NULL, new 
UnaryFunctionOperatorConversion(SqlStdOperatorTable.IS_NULL, "isnull"));
   druidOperatorMap.put(SqlStdOperatorTable.IS_NOT_NULL,
-  new UnarySuffixOperatorConversion(SqlStdOperatorTable.IS_NOT_NULL, 
"notnull")
+  new UnaryFunctionOperatorConversion(SqlStdOperatorTable.IS_NOT_NULL, 
"notnull")
   );
   druidOperatorMap.put(HiveTruncSqlOperator.INSTANCE, new 
DruidDateTruncOperatorConversion());
   druidOperatorMap.put(HiveToDateSqlOperator.INSTANCE, new 
DruidToDateOperatorConversion());
@@ -346,4 +348,33 @@ public class DruidSqlOperatorConverter {
 );
   }
 
+  public static class UnaryFunctionOperatorConversion implements 
org.apache.calcite.adapter.druid.DruidSqlOperatorConverter {
+
+private final SqlOperator operator;
+private final String druidOperator;
+
+public UnaryFunctionOperatorConversion(SqlOperator operator, String 
druidOperator) {
+  this.operator = operator;
+  this.druidOperator = druidOperator;
+}
+
+@Override public SqlOperator calciteOperator() {
+  return operator;
+}
+
+@Override public String toDruidExpression(RexNode rexNode, RelDataType 
rowType,
+DruidQuery druidQuery) {
+  final RexCall call = (RexCall) rexNode;
+
+  final List druidExpressions = 
DruidExpressions.toDruidExpressions(
+  druidQuery, rowType,
+  call.getOperands());
+
+  if (druidExpressions == null) {
+return null;
+  }
+
+  return DruidQuery.format("%s(%s)", druidOperator, 
Iterables.getOnlyElement(druidExpressions));
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/3a6ad266/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
--
diff --git a/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out 
b/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
index 89da36a..4e078aa 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
@@ -814,7 +814,7 @@ STAGE PLANS:
   properties:
 druid.fieldNames vc,vc0
 druid.fieldTypes boolean,boolean
-druid.query.json 

hive git commit: HIVE-19725: Add ability to dump non-native tables in replication metadata dump (Mahesh Kumar Behera, reviewed by Sankar Hariappan)

2018-06-18 Thread sankarh
Repository: hive
Updated Branches:
  refs/heads/master 4ec256c23 -> 6a16a71ce


HIVE-19725: Add ability to dump non-native tables in replication metadata dump 
(Mahesh Kumar Behera, reviewed by Sankar Hariappan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6a16a71c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6a16a71c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6a16a71c

Branch: refs/heads/master
Commit: 6a16a71ce99ff5d2f7bfa69cfcb475d4adc9873f
Parents: 4ec256c
Author: Sankar Hariappan 
Authored: Mon Jun 18 06:23:41 2018 -0700
Committer: Sankar Hariappan 
Committed: Mon Jun 18 06:23:41 2018 -0700

--
 .../hadoop/hive/ql/parse/TestExportImport.java  | 44 +++-
 ...TestReplicationScenariosAcrossInstances.java | 29 -
 .../hadoop/hive/ql/parse/repl/dump/Utils.java   |  3 +-
 3 files changed, 73 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/6a16a71c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java
index 67b74c2..53d13d8 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java
@@ -30,9 +30,12 @@ import org.junit.Test;
 import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import java.io.IOException;
 import java.util.HashMap;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 public class TestExportImport {
 
@@ -122,4 +125,43 @@ public class TestExportImport {
 .verifyResults(new String[] { "1", "2" });
 
   }
+
+  @Test
+  public void testExportNonNativeTable() throws Throwable {
+String path = "hdfs:///tmp/" + dbName + "/";
+String exportPath = path + "1/";
+String exportMetaPath = exportPath + "/Meta";
+String tableName =  testName.getMethodName();
+String createTableQuery =
+"CREATE TABLE " + tableName + " ( serde_id bigint COMMENT 'from 
deserializer', name string "
++ "COMMENT 'from deserializer', slib string COMMENT 'from 
deserializer') "
++ "ROW FORMAT SERDE 
'org.apache.hive.storage.jdbc.JdbcSerDe' "
++ "STORED BY 
'org.apache.hive.storage.jdbc.JdbcStorageHandler' "
++ "WITH SERDEPROPERTIES ('serialization.format'='1') "
++ "TBLPROPERTIES ( "
++ "'hive.sql.database.type'='METASTORE', "
++ "'hive.sql.query'='SELECT \"SERDE_ID\", \"NAME\", 
\"SLIB\" FROM \"SERDES\"')";
+
+srcHiveWarehouse.run("use " + dbName)
+.run(createTableQuery)
+.runFailure("export table " + tableName + " to '" + exportPath + 
"'")
+.run("export table " + tableName + " to '" + exportMetaPath + "'" 
+ " for metadata replication('1')");
+
+destHiveWarehouse.run("use " + replDbName)
+.runFailure("import table " +  tableName + " from '" + exportPath 
+ "'")
+.run("show tables")
+.verifyFailure(new String[] {tableName})
+.run("import table " + tableName + " from '" + exportMetaPath + 
"'")
+.run("show tables")
+.verifyResult(tableName);
+
+// check physical path
+Path checkPath = new Path(exportPath);
+checkPath = new Path(checkPath, EximUtil.DATA_PATH_NAME);
+FileSystem fs = checkPath.getFileSystem(srcHiveWarehouse.hiveConf);
+assertFalse(fs.exists(checkPath));
+checkPath = new Path(exportMetaPath);
+checkPath = new Path(checkPath, EximUtil.METADATA_NAME);
+assertTrue(fs.exists(checkPath));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/6a16a71c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
index 26e308c..0f67174 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
@@ -797,7 +797,7 @@ public 

[2/2] hive git commit: HIVE-19909: qtests: retire hadoop_major version specific tests; and logics (Zoltan Haindrich reviewed by Teddy Choi)

2018-06-18 Thread kgyrtkirk
HIVE-19909: qtests: retire hadoop_major version specific tests; and logics 
(Zoltan Haindrich reviewed by Teddy Choi)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4ec256c2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4ec256c2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4ec256c2

Branch: refs/heads/master
Commit: 4ec256c23d5986385f0ad4ff0ae43b72822b6756
Parents: ebd2c5f
Author: Zoltan Haindrich 
Authored: Mon Jun 18 10:35:12 2018 +0200
Committer: Zoltan Haindrich 
Committed: Mon Jun 18 10:35:12 2018 +0200

--
 .../src/test/queries/negative/cascade_dbdrop.q  |   1 -
 .../queries/negative/cascade_dbdrop_hadoop20.q  |  29 --
 .../control/AbstractCoreBlobstoreCliDriver.java |   7 -
 .../hive/cli/control/CoreAccumuloCliDriver.java |   5 -
 .../hadoop/hive/cli/control/CoreCliDriver.java  |   8 -
 .../hive/cli/control/CoreCompareCliDriver.java  |   7 +-
 .../hive/cli/control/CoreHBaseCliDriver.java|   5 -
 .../cli/control/CoreHBaseNegativeCliDriver.java |   5 -
 .../hive/cli/control/CoreNegativeCliDriver.java |   7 +-
 .../hive/cli/control/CorePerfCliDriver.java |  10 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java| 110 +--
 ql/src/test/queries/clientnegative/archive1.q   |   1 -
 ql/src/test/queries/clientnegative/archive2.q   |   1 -
 ql/src/test/queries/clientnegative/archive3.q   |   1 -
 ql/src/test/queries/clientnegative/archive4.q   |   1 -
 .../queries/clientnegative/archive_corrupt.q|   1 -
 .../queries/clientnegative/archive_insert1.q|   1 -
 .../queries/clientnegative/archive_insert2.q|   1 -
 .../queries/clientnegative/archive_insert3.q|   1 -
 .../queries/clientnegative/archive_insert4.q|   1 -
 .../queries/clientnegative/archive_multi1.q |   1 -
 .../queries/clientnegative/archive_multi2.q |   1 -
 .../queries/clientnegative/archive_multi3.q |   1 -
 .../queries/clientnegative/archive_multi4.q |   1 -
 .../queries/clientnegative/archive_multi5.q |   1 -
 .../queries/clientnegative/archive_multi6.q |   1 -
 .../queries/clientnegative/archive_multi7.q |   1 -
 .../queries/clientnegative/archive_partspec1.q  |   1 -
 .../queries/clientnegative/archive_partspec2.q  |   1 -
 .../queries/clientnegative/archive_partspec3.q  |   1 -
 .../queries/clientnegative/archive_partspec4.q  |   1 -
 .../queries/clientnegative/archive_partspec5.q  |   1 -
 ql/src/test/queries/clientnegative/autolocal1.q |  16 --
 .../clientnegative/mapreduce_stack_trace.q  |   1 -
 .../mapreduce_stack_trace_turnoff.q |   1 -
 .../alter_numbuckets_partitioned_table_h23.q|   1 -
 .../test/queries/clientpositive/archive_multi.q |   1 -
 .../test/queries/clientpositive/auto_join14.q   |   1 -
 .../clientpositive/auto_join14_hadoop20.q   |  20 --
 .../cbo_rp_udaf_percentile_approx_23.q  |   1 -
 ql/src/test/queries/clientpositive/combine2.q   |   1 -
 .../queries/clientpositive/combine2_hadoop20.q  |  50 
 ql/src/test/queries/clientpositive/ctas.q   |   1 -
 .../queries/clientpositive/groupby_sort_1.q | 283 --
 .../queries/clientpositive/groupby_sort_1_23.q  |   1 -
 .../clientpositive/groupby_sort_skew_1.q| 285 ---
 .../clientpositive/groupby_sort_skew_1_23.q |   1 -
 .../infer_bucket_sort_list_bucket.q |   1 -
 ql/src/test/queries/clientpositive/input12.q|   1 -
 .../queries/clientpositive/input12_hadoop20.q   |  24 --
 ql/src/test/queries/clientpositive/input39.q|   1 -
 .../queries/clientpositive/input39_hadoop20.q   |  31 --
 ql/src/test/queries/clientpositive/join14.q |   1 -
 .../queries/clientpositive/join14_hadoop20.q|  17 --
 .../test/queries/clientpositive/lb_fs_stats.q   |   1 -
 .../queries/clientpositive/list_bucket_dml_1.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_11.q |   1 -
 .../queries/clientpositive/list_bucket_dml_12.q |   1 -
 .../queries/clientpositive/list_bucket_dml_13.q |   1 -
 .../queries/clientpositive/list_bucket_dml_14.q |   1 -
 .../queries/clientpositive/list_bucket_dml_2.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_3.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_4.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_5.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_6.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_7.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_8.q  |   1 -
 .../queries/clientpositive/list_bucket_dml_9.q  |   1 -
 .../list_bucket_query_multiskew_1.q |   1 -
 .../list_bucket_query_multiskew_2.q |   1 -
 .../list_bucket_query_multiskew_3.q |   1 -
 .../list_bucket_query_oneskew_1.q   |   1 -
 .../list_bucket_query_oneskew_2.q   |   1 -
 .../list_bucket_query_oneskew_3.q   |   1 -
 

[1/2] hive git commit: HIVE-19909: qtests: retire hadoop_major version specific tests; and logics (Zoltan Haindrich reviewed by Teddy Choi)

2018-06-18 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master ebd2c5f8a -> 4ec256c23


http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
--
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q 
b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
index a5f5522..f933545 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
@@ -4,7 +4,6 @@ set hive.optimize.listbucketing=true;
 set mapred.input.dir.recursive=true;   
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) 
 -- SORT_QUERY_RESULTS
 
 -- List bucketing query logic test case. We simulate the directory structure 
by DML here.

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
--
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q 
b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
index 4020063..d5f6a26 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
@@ -4,7 +4,6 @@ set hive.optimize.listbucketing=true;
 set mapred.input.dir.recursive=true;   
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) 
 
 -- List bucketing query logic test case. We simulate the directory structure 
by DML here.
 -- Test condition: 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
--
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q 
b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
index 54ab75e..fc5815c 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
@@ -4,7 +4,6 @@ set hive.optimize.listbucketing=true;
 set mapred.input.dir.recursive=true;   
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) 
 -- SORT_QUERY_RESULTS
 
 -- List bucketing query logic test case. 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
--
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q 
b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
index 77974cf..bc4f96c 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
@@ -4,7 +4,6 @@ set hive.optimize.listbucketing=true;
 set mapred.input.dir.recursive=true;   
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) 
 -- SORT_QUERY_RESULTS
 
 -- List bucketing query logic test case. 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
--
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q 
b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
index bf6b227..64193f1 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
@@ -4,7 +4,6 @@ set hive.optimize.listbucketing=true;
 set mapred.input.dir.recursive=true;   
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) 
 -- SORT_QUERY_RESULTS
 
 -- List bucketing query logic test case. 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/loadpart_err.q
--
diff --git a/ql/src/test/queries/clientpositive/loadpart_err.q 
b/ql/src/test/queries/clientpositive/loadpart_err.q
deleted file mode 100644
index 1204622..000
--- a/ql/src/test/queries/clientpositive/loadpart_err.q
+++ /dev/null
@@ -1,21 +0,0 @@
---! qt:dataset:src
-set hive.cli.errors.ignore=true;
-
-ADD FILE ../../data/scripts/error_script;
-
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S, 0.23)
--- (this test is flaky so it is currently disabled for all Hadoop versions)
-
-CREATE TABLE loadpart1(a STRING, b STRING) PARTITIONED BY (ds STRING);
-
-INSERT OVERWRITE TABLE loadpart1 PARTITION (ds='2009-01-01')
-SELECT TRANSFORM(src.key, src.value) USING 'error_script' AS