[31/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/cbo_join.q.out
--
diff --git a/ql/src/test/results/clientpositive/cbo_join.q.out 
b/ql/src/test/results/clientpositive/cbo_join.q.out
deleted file mode 100644
index c5e9858..000
--- a/ql/src/test/results/clientpositive/cbo_join.q.out
+++ /dev/null
@@ -1,15028 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- 4. Test Select + Join + TS
-select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on 
cbo_t1.key=cbo_t2.key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
-PREHOOK: Input: default@cbo_t2
-PREHOOK: Input: default@cbo_t2@dt=2014
- A masked pattern was here 
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- 4. Test Select + Join + TS
-select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on 
cbo_t1.key=cbo_t2.key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
-POSTHOOK: Input: default@cbo_t2
-POSTHOOK: Input: default@cbo_t2@dt=2014
- A masked pattern was here 
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-PREHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
-PREHOOK: Input: default@cbo_t3
- A masked pattern was here 
-POSTHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
-POSTHOOK: Input: default@cbo_t3
- A masked pattern was here 
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-PREHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3 where 
cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
-PREHOOK: Input: default@cbo_t3
- A masked pattern was here 
-POSTHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3 where 
cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
-POSTHOOK: Input: default@cbo_t3
- A masked pattern was here 
- 1
- 1
- 1
- 1
- 1 
- 1 
- 1 
- 1 
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1 
-1 
-1 
-1 
-PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join  
cbo_t2 on cbo_t1.key=cbo_t2.key
-PR

[53/54] hive git commit: HIVE-17338: Utilities.get*Tasks multiple methods duplicate code (Gergely Hajós via Zoltan Haindrich)

2017-09-21 Thread kgyrtkirk
HIVE-17338: Utilities.get*Tasks multiple methods duplicate code (Gergely Hajós 
via Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cb770077
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cb770077
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cb770077

Branch: refs/heads/master
Commit: cb770077d47931ff33a5f272982133b03f8d2a75
Parents: 229a7cc
Author: Gergely Hajós 
Authored: Thu Sep 21 10:23:52 2017 +0200
Committer: Zoltan Haindrich 
Committed: Thu Sep 21 10:23:52 2017 +0200

--
 .../apache/hadoop/hive/ql/exec/Utilities.java   | 106 +--
 .../hadoop/hive/ql/exec/TestUtilities.java  |  22 
 2 files changed, 46 insertions(+), 82 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/cb770077/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 4322cc6..ae70cba 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -2444,99 +2444,41 @@ public final class Utilities {
   }
 
   public static List getTezTasks(List> 
tasks) {
-List tezTasks = new ArrayList();
-if (tasks != null) {
-  Set> visited = new HashSet>();
-  while (!tasks.isEmpty()) {
-tasks = getTezTasks(tasks, tezTasks, visited);
-  }
-}
-return tezTasks;
-  }
-
-  private static List> getTezTasks(
-  List> tasks,
-  List tezTasks,
-  Set> visited) {
-List> childTasks = new ArrayList<>();
-for (Task task : tasks) {
-  if (visited.contains(task)) {
-continue;
-  }
-  if (task instanceof TezTask && !tezTasks.contains(task)) {
-tezTasks.add((TezTask) task);
-  }
-
-  if (task.getDependentTasks() != null) {
-childTasks.addAll(task.getDependentTasks());
-  }
-  visited.add(task);
-}
-return childTasks;
+return getTasks(tasks, TezTask.class);
   }
 
   public static List getSparkTasks(List> tasks) {
-List sparkTasks = new ArrayList();
-if (tasks != null) {
-  Set> visited = new HashSet>();
-  while (!tasks.isEmpty()) {
-tasks = getSparkTasks(tasks, sparkTasks, visited);
-  }
-}
-return sparkTasks;
+return getTasks(tasks, SparkTask.class);
   }
 
-  private static List> getSparkTasks(
-  List> tasks,
-  List sparkTasks,
-  Set> visited) {
-List> childTasks = new ArrayList<>();
-for (Task task : tasks) {
-  if (visited.contains(task)) {
-continue;
-  }
-  if (task instanceof SparkTask && !sparkTasks.contains(task)) {
-sparkTasks.add((SparkTask) task);
-  }
-
-  if (task.getDependentTasks() != null) {
-childTasks.addAll(task.getDependentTasks());
-  }
-  visited.add(task);
-}
-return childTasks;
+  public static List getMRTasks(List> 
tasks) {
+return getTasks(tasks, ExecDriver.class);
   }
 
-  public static List getMRTasks(List> 
tasks) {
-List mrTasks = new ArrayList();
+  @SuppressWarnings("unchecked")
+  public static  List getTasks(List> tasks, 
Class requiredType) {
+List typeSpecificTasks = new ArrayList<>();
 if (tasks != null) {
-  Set> visited = new HashSet>();
+  Set> visited = new HashSet<>();
   while (!tasks.isEmpty()) {
-tasks = getMRTasks(tasks, mrTasks, visited);
-  }
-}
-return mrTasks;
-  }
-
-  private static List> getMRTasks(
-  List> tasks,
-  List mrTasks,
-  Set> visited) {
-List> childTasks = new ArrayList<>();
-for (Task task : tasks) {
-  if (visited.contains(task)) {
-continue;
-  }
-  if (task instanceof ExecDriver && !mrTasks.contains(task)) {
-mrTasks.add((ExecDriver) task);
-  }
-
-  if (task.getDependentTasks() != null) {
-childTasks.addAll(task.getDependentTasks());
+List> childTasks = new ArrayList<>();
+for (Task task : tasks) {
+  if (visited.contains(task)) {
+continue;
+  }
+  if (requiredType.isInstance(task) && 
!typeSpecificTasks.contains(task)) {
+typeSpecificTasks.add((T) task);
+  }
+  if (task.getDependentTasks() != null) {
+childTasks.addAll(task.getDependentTasks());
+  }
+  visited.add(task);
+}
+// start recursion
+tasks = childTasks;
   }
-  visited.add(task);
 }
-return childTasks;
+return typeSpecificTasks;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/cb770077/q

[34/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/bucketsortoptimize_insert_2.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_2.q.out 
b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_2.q.out
deleted file mode 100644
index 9761293..000
--- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_2.q.out
+++ /dev/null
@@ -1,1244 +0,0 @@
-PREHOOK: query: -- Create two bucketed and sorted tables
-CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING)
-CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@test_table1
-POSTHOOK: query: -- Create two bucketed and sorted tables
-CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING)
-CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@test_table1
-PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED 
BY (ds STRING)
-CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@test_table2
-POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED 
BY (ds STRING)
-CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@test_table2
-PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED 
BY (ds STRING)
-CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@test_table3
-POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED 
BY (ds STRING)
-CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@test_table3
-PREHOOK: query: FROM src
-INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@test_table1@ds=1
-POSTHOOK: query: FROM src
-INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@test_table1@ds=1
-POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: FROM src
-INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 
100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@test_table2@ds=1
-POSTHOOK: query: FROM src
-INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 
100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@test_table2@ds=1
-POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: FROM src
-INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '2') SELECT * where key < 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@test_table1@ds=2
-POSTHOOK: query: FROM src
-INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '2') SELECT * where key < 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@test_table1@ds=2
-POSTHOOK: Lineage: test_table1 PARTITION(ds=2).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: test_table1 PARTITION(ds=2).value SIMPLE 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: FROM src
-INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT * where key < 
100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@test_table2@ds=2
-POSTHOOK: query: FROM src
-INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT * where key < 
100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@test_table2@ds=2
-POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- Insert data into the bucketed table by selecting from 
another bucketed table
--- This should be a map-only operation
-EXPLAIN
-INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
-SELECT a.key, concat(a.value, b.value) 
-FROM test_

[52/54] hive git commit: HIVE-17541: Move testing related methods from MetaStoreUtils to some testing related utility (Zoltan Haindrich, reviewed by Alan Gates)

2017-09-21 Thread kgyrtkirk
HIVE-17541: Move testing related methods from MetaStoreUtils to some testing 
related utility (Zoltan Haindrich, reviewed by Alan Gates)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/229a7cc8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/229a7cc8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/229a7cc8

Branch: refs/heads/master
Commit: 229a7cc819e2db04492591e4e30e15bb60c5a5a1
Parents: 6f5c113
Author: Zoltan Haindrich 
Authored: Thu Sep 21 10:22:12 2017 +0200
Committer: Zoltan Haindrich 
Committed: Thu Sep 21 10:22:18 2017 +0200

--
 hcatalog/core/pom.xml   |   7 +
 .../apache/hive/hcatalog/cli/TestPermsGrp.java  |   5 +-
 .../mapreduce/TestHCatMultiOutputFormat.java|   4 +-
 .../mapreduce/TestHCatPartitionPublish.java |   5 +-
 hcatalog/webhcat/java-client/pom.xml|   7 +
 .../hive/hcatalog/api/TestHCatClient.java   |   6 +-
 hcatalog/webhcat/svr/pom.xml|   7 +
 .../hive/hcatalog/templeton/TestWebHCatE2e.java |   4 +-
 .../security/TestHadoopAuthBridge23.java|   4 +-
 .../AbstractTestAuthorizationApiAuthorizer.java |   4 +-
 .../hadoop/hive/metastore/TestFilterHooks.java  |   4 +-
 ...TestHiveMetaStoreWithEnvironmentContext.java |   4 +-
 .../hive/metastore/TestMarkPartitionRemote.java |   2 +-
 .../metastore/TestMetaStoreAuthorization.java   |   2 +-
 .../TestMetaStoreEndFunctionListener.java   |   4 +-
 .../metastore/TestMetaStoreEventListener.java   |   4 +-
 .../TestMetaStoreEventListenerOnlyOnCommit.java |   4 +-
 .../metastore/TestMetaStoreInitListener.java|   4 +-
 .../metastore/TestMetaStoreListenersError.java  |   4 +-
 .../hive/metastore/TestMetaStoreMetrics.java|   4 +-
 .../hive/metastore/TestRemoteHiveMetaStore.java |   4 +-
 .../TestRemoteHiveMetaStoreIpAddress.java   |   4 +-
 .../hive/metastore/TestRetryingHMSHandler.java  |   4 +-
 ...estDDLWithRemoteMetastoreSecondNamenode.java |   6 +-
 .../hive/ql/parse/TestReplicationScenarios.java |   6 +-
 .../hadoop/hive/ql/parse/WarehouseInstance.java |   4 +-
 .../security/StorageBasedMetastoreTestBase.java |   6 +-
 .../TestAuthorizationPreEventListener.java  |   6 +-
 .../TestClientSideAuthorizationProvider.java|   6 +-
 .../TestMetastoreAuthorizationProvider.java |   6 +-
 .../TestMultiAuthorizationPreEventListener.java |   6 +-
 .../thrift/ThriftCliServiceMessageSizeTest.java |   4 +-
 itests/util/pom.xml |   6 +
 .../org/apache/hive/jdbc/miniHS2/MiniHS2.java   |  12 +-
 llap-server/pom.xml |   8 +
 .../services/impl/TestLlapWebServices.java  |   4 +-
 metastore/pom.xml   |  11 ++
 .../hadoop/hive/metastore/MetaStoreUtils.java   | 155 ++-
 .../hive/metastore/MetaStoreTestUtils.java  | 147 ++
 .../metastore/TestHiveMetaStoreGetMetaConf.java |   2 +-
 .../TestHiveMetaStorePartitionSpecs.java|   2 +-
 .../TestRetriesInRetryingHMSHandler.java|   2 +-
 ql/pom.xml  |  21 ++-
 .../TestHostAffinitySplitLocationProvider.java  |   8 +-
 .../hadoop/hive/ql/metadata/TestHiveRemote.java |   4 +-
 service/pom.xml |   7 +
 .../cli/thrift/ThriftCLIServiceTest.java|   4 +-
 .../thrift/ThriftCliServiceTestWithCookie.java  |   4 +-
 .../hive/service/server/TestHS2HttpServer.java  |   4 +-
 49 files changed, 320 insertions(+), 232 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/229a7cc8/hcatalog/core/pom.xml
--
diff --git a/hcatalog/core/pom.xml b/hcatalog/core/pom.xml
index d151374..94e9fbe 100644
--- a/hcatalog/core/pom.xml
+++ b/hcatalog/core/pom.xml
@@ -67,6 +67,13 @@
   test-jar
   test
 
+
+  org.apache.hive
+  hive-metastore
+  ${project.version}
+  test-jar
+  test
+
 
 
   com.google.guava

http://git-wip-us.apache.org/repos/asf/hive/blob/229a7cc8/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
--
diff --git 
a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java 
b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
index 374c1d2..80c5d63 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClien

[17/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/correlationoptimizer6.q.out
--
diff --git a/ql/src/test/results/clientpositive/correlationoptimizer6.q.out 
b/ql/src/test/results/clientpositive/correlationoptimizer6.q.out
deleted file mode 100644
index 3ae73fc..000
--- a/ql/src/test/results/clientpositive/correlationoptimizer6.q.out
+++ /dev/null
@@ -1,3918 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
-
--- When Correlation Optimizer is turned off, 6 MR jobs are needed.
--- When Correlation Optimizer is turned on, 2 MR jobs are needed.
--- The first job will evaluate subquery xx, subquery yy, and xx join yy.
-EXPLAIN
-SELECT xx.key, xx.cnt, yy.key, yy.cnt
-FROM
-(SELECT x.key as key, count(1) as cnt FROM src1 x JOIN src1 y ON (x.key = 
y.key) group by x.key) xx
-JOIN
-(SELECT x.key as key, count(1) as cnt FROM src x JOIN src y ON (x.key = y.key) 
group by x.key) yy
-ON xx.key=yy.key
-PREHOOK: type: QUERY
-POSTHOOK: query: -- SORT_QUERY_RESULTS
-
--- When Correlation Optimizer is turned off, 6 MR jobs are needed.
--- When Correlation Optimizer is turned on, 2 MR jobs are needed.
--- The first job will evaluate subquery xx, subquery yy, and xx join yy.
-EXPLAIN
-SELECT xx.key, xx.cnt, yy.key, yy.cnt
-FROM
-(SELECT x.key as key, count(1) as cnt FROM src1 x JOIN src1 y ON (x.key = 
y.key) group by x.key) xx
-JOIN
-(SELECT x.key as key, count(1) as cnt FROM src x JOIN src y ON (x.key = y.key) 
group by x.key) yy
-ON xx.key=yy.key
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-3 depends on stages: Stage-2, Stage-6
-  Stage-5 is a root stage
-  Stage-6 depends on stages: Stage-5
-  Stage-0 depends on stages: Stage-3
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: x
-Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: key is not null (type: boolean)
-  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: key (type: string)
-outputColumnNames: _col0
-Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 25 Data size: 191 Basic stats: 
COMPLETE Column stats: NONE
-  TableScan
-alias: y
-Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: key is not null (type: boolean)
-  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: key (type: string)
-outputColumnNames: _col0
-Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 25 Data size: 191 Basic stats: 
COMPLETE Column stats: NONE
-  Reduce Operator Tree:
-Join Operator
-  condition map:
-   Inner Join 0 to 1
-  keys:
-0 _col0 (type: string)
-1 _col0 (type: string)
-  outputColumnNames: _col0
-  Statistics: Num rows: 27 Data size: 210 Basic stats: COMPLETE Column 
stats: NONE
-  Group By Operator
-aggregations: count(1)
-keys: _col0 (type: string)
-mode: hash
-outputColumnNames: _col0, _col1
-Statistics: Num rows: 27 Data size: 210 Basic stats: COMPLETE 
Column stats: NONE
-File Output Operator
-  compressed: false
-  table:
-  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-Map Reduce
-  Map Operator Tree:
-  TableScan
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 27 Data size: 210 Basic stats: COMPLETE 
Column stats: NONE
-  value expressions: _col1 (type: bigint)
-  Reduce Operator Tree:
-Grou

[08/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/join32_lessSize.q.out
--
diff --git a/ql/src/test/results/clientpositive/join32_lessSize.q.out 
b/ql/src/test/results/clientpositive/join32_lessSize.q.out
deleted file mode 100644
index f9f3506..000
--- a/ql/src/test/results/clientpositive/join32_lessSize.q.out
+++ /dev/null
@@ -1,2651 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
-
-CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dest_j1
-POSTHOOK: query: -- SORT_QUERY_RESULTS
-
-CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dest_j1
-PREHOOK: query: CREATE TABLE dest_j2(key STRING, value STRING, val2 STRING) 
STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dest_j2
-POSTHOOK: query: CREATE TABLE dest_j2(key STRING, value STRING, val2 STRING) 
STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dest_j2
-PREHOOK: query: -- Since the inputs are small, it should be automatically 
converted to mapjoin
-
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest_j1
-SELECT x.key, z.value, y.value
-FROM src1 x JOIN src y ON (x.key = y.key) 
-JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11)
-PREHOOK: type: QUERY
-POSTHOOK: query: -- Since the inputs are small, it should be automatically 
converted to mapjoin
-
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest_j1
-SELECT x.key, z.value, y.value
-FROM src1 x JOIN src y ON (x.key = y.key) 
-JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11)
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-9 is a root stage
-  Stage-7 depends on stages: Stage-9
-  Stage-8 depends on stages: Stage-7
-  Stage-6 depends on stages: Stage-8
-  Stage-0 depends on stages: Stage-6
-  Stage-3 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-9
-Map Reduce Local Work
-  Alias -> Map Local Tables:
-$hdt$_2:x 
-  Fetch Operator
-limit: -1
-  Alias -> Map Local Operator Tree:
-$hdt$_2:x 
-  TableScan
-alias: x
-Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
-GatherStats: false
-Filter Operator
-  isSamplingPred: false
-  predicate: (key is not null and value is not null) (type: 
boolean)
-  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: key (type: string), value (type: string)
-outputColumnNames: _col0, _col1
-Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
-HashTable Sink Operator
-  keys:
-0 _col0 (type: string)
-1 _col1 (type: string)
-  Position of Big Table: 0
-
-  Stage: Stage-7
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: z
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-GatherStats: false
-Filter Operator
-  isSamplingPred: false
-  predicate: value is not null (type: boolean)
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: value (type: string)
-outputColumnNames: _col0
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-Map Join Operator
-  condition map:
-   Inner Join 0 to 1
-  keys:
-0 _col0 (type: string)
-1 _col1 (type: string)
-  outputColumnNames: _col0, _col3
-  Position of Big Table: 0
-  Statistics: Num rows: 550 Data size: 5843 Basic stats: 
COMPLETE Column stats: NONE
-  File Output Operator
-compressed: false
-GlobalTableId: 0
- A masked pattern was here 
-NumFilesPerFileSink: 1
-table:
-input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-properties:
-  columns _col0,_col3
-  columns.types string,string
-  escape.delim \
-  serializat

[18/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/correlationoptimizer4.q.out
--
diff --git a/ql/src/test/results/clientpositive/correlationoptimizer4.q.out 
b/ql/src/test/results/clientpositive/correlationoptimizer4.q.out
deleted file mode 100644
index 8596827..000
--- a/ql/src/test/results/clientpositive/correlationoptimizer4.q.out
+++ /dev/null
@@ -1,1862 +0,0 @@
-PREHOOK: query: CREATE TABLE T1(key INT, val STRING)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T1
-POSTHOOK: query: CREATE TABLE T1(key INT, val STRING)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T1
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@t1
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@t1
-PREHOOK: query: CREATE TABLE T2(key INT, val STRING)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T2
-POSTHOOK: query: CREATE TABLE T2(key INT, val STRING)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T2
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@t2
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@t2
-PREHOOK: query: CREATE TABLE T3(key INT, val STRING)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T3
-POSTHOOK: query: CREATE TABLE T3(key INT, val STRING)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T3
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T3
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@t3
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T3
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@t3
-PREHOOK: query: -- When Correlation Optimizer is turned off, this query will 
be evaluated
--- by 3 MR jobs. 
--- When Correlation Optimizer is turned on, this query will be evaluated by
--- 2 MR jobs. The subquery tmp will be evaluated in a single MR job.
-EXPLAIN
-SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
-FROM (SELECT y.key AS key, count(1) AS cnt
-  FROM T2 x JOIN T1 y ON (x.key = y.key) JOIN T3 z ON (y.key = z.key)
-  GROUP BY y.key) tmp
-PREHOOK: type: QUERY
-POSTHOOK: query: -- When Correlation Optimizer is turned off, this query will 
be evaluated
--- by 3 MR jobs. 
--- When Correlation Optimizer is turned on, this query will be evaluated by
--- 2 MR jobs. The subquery tmp will be evaluated in a single MR job.
-EXPLAIN
-SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
-FROM (SELECT y.key AS key, count(1) AS cnt
-  FROM T2 x JOIN T1 y ON (x.key = y.key) JOIN T3 z ON (y.key = z.key)
-  GROUP BY y.key) tmp
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-3 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-3
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: x
-Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE Column 
stats: NONE
-Filter Operator
-  predicate: key is not null (type: boolean)
-  Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: key (type: int)
-outputColumnNames: _col0
-Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE 
Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: int)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: int)
-  Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE 
Column stats: NONE
-  TableScan
-alias: y
-Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE Column 
stats: NONE
-Filter Operator
-  predicate: key is not null (type: boolean)
-  Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: key (type: int)
-outputColumnNames: _col0
-Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE 
Column stats: NONE
-Reduce Output Operator
-  

[05/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/lateral_view.q.out
--
diff --git a/ql/src/test/results/clientpositive/lateral_view.q.out 
b/ql/src/test/results/clientpositive/lateral_view.q.out
deleted file mode 100644
index 5358cc1..000
--- a/ql/src/test/results/clientpositive/lateral_view.q.out
+++ /dev/null
@@ -1,900 +0,0 @@
-PREHOOK: query: CREATE TABLE tmp_pyang_lv (inputs string) STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@tmp_pyang_lv
-POSTHOOK: query: CREATE TABLE tmp_pyang_lv (inputs string) STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tmp_pyang_lv
-PREHOOK: query: INSERT OVERWRITE TABLE tmp_pyang_lv SELECT key FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tmp_pyang_lv
-POSTHOOK: query: INSERT OVERWRITE TABLE tmp_pyang_lv SELECT key FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tmp_pyang_lv
-POSTHOOK: Lineage: tmp_pyang_lv.inputs SIMPLE [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) 
myTable AS myCol SORT BY key ASC, myCol ASC LIMIT 1
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) 
myTable AS myCol SORT BY key ASC, myCol ASC LIMIT 1
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: src
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Lateral View Forward
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: key (type: string), value (type: string)
-outputColumnNames: key, value
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-Lateral View Join Operator
-  outputColumnNames: _col0, _col1, _col5
-  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  Select Operator
-expressions: _col0 (type: string), _col1 (type: string), 
_col5 (type: int)
-outputColumnNames: _col0, _col1, _col2
-Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string), _col2 (type: int)
-  sort order: ++
-  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  TopN Hash Memory Usage: 0.1
-  value expressions: _col1 (type: string)
-  Select Operator
-expressions: array(1,2,3) (type: array)
-outputColumnNames: _col0
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-UDTF Operator
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  function name: explode
-  Lateral View Join Operator
-outputColumnNames: _col0, _col1, _col5
-Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-Select Operator
-  expressions: _col0 (type: string), _col1 (type: string), 
_col5 (type: int)
-  outputColumnNames: _col0, _col1, _col2
-  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  Reduce Output Operator
-key expressions: _col0 (type: string), _col2 (type: 
int)
-sort order: ++
-Statistics: Num rows: 1000 Data size: 10624 Basic 
stats: COMPLETE Column stats: NONE
-TopN Hash Memory Usage: 0.1
-value expressions: _col1 (type: string)
-  Reduce Operator Tree:
-Select Operator
-  expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: 
string), KEY.reducesinkkey1 (type: int)
-  outputColumnNames: _col0, _col1, _col2
-  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
-  Limit
-Number of rows: 1
-Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column 
stats: NONE
-File Output Operator
-

[48/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/autoColumnStats_2.q.out
--
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_2.q.out 
b/ql/src/test/results/clientpositive/autoColumnStats_2.q.out
deleted file mode 100644
index 791e6ae..000
--- a/ql/src/test/results/clientpositive/autoColumnStats_2.q.out
+++ /dev/null
@@ -1,1500 +0,0 @@
-PREHOOK: query: drop table src_multi1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table src_multi1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table src_multi1 like src
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_multi1
-POSTHOOK: query: create table src_multi1 like src
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_multi1
-PREHOOK: query: insert into table src_multi1 select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@src_multi1
-POSTHOOK: query: insert into table src_multi1 select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@src_multi1
-POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
-PREHOOK: query: explain extended select * from src_multi1
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended select * from src_multi1
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-Fetch Operator
-  limit: -1
-  Processor Tree:
-TableScan
-  alias: src_multi1
-  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE 
Column stats: COMPLETE
-  GatherStats: false
-  Select Operator
-expressions: key (type: string), value (type: string)
-outputColumnNames: _col0, _col1
-Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE 
Column stats: COMPLETE
-ListSink
-
-PREHOOK: query: describe formatted src_multi1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@src_multi1
-POSTHOOK: query: describe formatted src_multi1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@src_multi1
-# col_name data_type   comment 
-
-keystring  default 
-value  string  default 
-
-# Detailed Table Information
-Database:  default  
- A masked pattern was here 
-Retention: 0
- A masked pattern was here 
-Table Type:MANAGED_TABLE
-Table Parameters:   
-   COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
-   numFiles1   
-   numRows 500 
-   rawDataSize 5312
-   totalSize   5812
- A masked pattern was here 
-
-# Storage Information   
-SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe  
 
-InputFormat:   org.apache.hadoop.mapred.TextInputFormat 
-OutputFormat:  
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat   
-Compressed:No   
-Num Buckets:   -1   
-Bucket Columns:[]   
-Sort Columns:  []   
-Storage Desc Params:
-   serialization.format1   
-PREHOOK: query: drop table a
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table a
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table b
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table b
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table a like src
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@a
-POSTHOOK: query: create table a like src
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@a
-PREHOOK: query: create table b like src
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@b
-POSTHOOK: query: create table b like src
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@b
-PREHOOK: query: from src
-insert into table a select *
-insert into table b select *
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@a
-PREHOOK: Output: default@b
-POSTHOOK: query: from src
-insert into table a select *
-insert into table b select *
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTH

[22/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/constprog_semijoin.q.out
--
diff --git a/ql/src/test/results/clientpositive/constprog_semijoin.q.out 
b/ql/src/test/results/clientpositive/constprog_semijoin.q.out
deleted file mode 100644
index 47f1169..000
--- a/ql/src/test/results/clientpositive/constprog_semijoin.q.out
+++ /dev/null
@@ -1,812 +0,0 @@
-PREHOOK: query: create table table1 (id int, val string, val1 string, dimid 
int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@table1
-POSTHOOK: query: create table table1 (id int, val string, val1 string, dimid 
int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@table1
-PREHOOK: query: insert into table1 (id, val, val1, dimid) values (1, 
't1val01', 'val101', 100), (2, 't1val02', 'val102', 200), (3, 't1val03', 
'val103', 103), (3, 't1val01', 'val104', 100), (2, 't1val05', 'val105', 200), 
(3, 't1val01', 'val106', 103), (1, 't1val07', 'val107', 200), (2, 't1val01', 
'val108', 200), (3, 't1val09', 'val109', 103), (4,'t1val01', 'val110', 200)
-PREHOOK: type: QUERY
-PREHOOK: Output: default@table1
-POSTHOOK: query: insert into table1 (id, val, val1, dimid) values (1, 
't1val01', 'val101', 100), (2, 't1val02', 'val102', 200), (3, 't1val03', 
'val103', 103), (3, 't1val01', 'val104', 100), (2, 't1val05', 'val105', 200), 
(3, 't1val01', 'val106', 103), (1, 't1val07', 'val107', 200), (2, 't1val01', 
'val108', 200), (3, 't1val09', 'val109', 103), (4,'t1val01', 'val110', 200)
-POSTHOOK: type: QUERY
-POSTHOOK: Output: default@table1
-POSTHOOK: Lineage: table1.dimid EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, 
type:string, comment:), ]
-POSTHOOK: Lineage: table1.id EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
-POSTHOOK: Lineage: table1.val SIMPLE 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
-POSTHOOK: Lineage: table1.val1 SIMPLE 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, 
type:string, comment:), ]
-PREHOOK: query: create table table2 (id int, val2 string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@table2
-POSTHOOK: query: create table table2 (id int, val2 string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@table2
-PREHOOK: query: insert into table2 (id, val2) values (1, 't2val201'), (2, 
't2val202'), (3, 't2val203')
-PREHOOK: type: QUERY
-PREHOOK: Output: default@table2
-POSTHOOK: query: insert into table2 (id, val2) values (1, 't2val201'), (2, 
't2val202'), (3, 't2val203')
-POSTHOOK: type: QUERY
-POSTHOOK: Output: default@table2
-POSTHOOK: Lineage: table2.id EXPRESSION 
[(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
-POSTHOOK: Lineage: table2.val2 SIMPLE 
[(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
-PREHOOK: query: create table table3 (id int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@table3
-POSTHOOK: query: create table table3 (id int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@table3
-PREHOOK: query: insert into table3 (id) values (100), (100), (101), (102), 
(103)
-PREHOOK: type: QUERY
-PREHOOK: Output: default@table3
-POSTHOOK: query: insert into table3 (id) values (100), (100), (101), (102), 
(103)
-POSTHOOK: type: QUERY
-POSTHOOK: Output: default@table3
-POSTHOOK: Lineage: table3.id EXPRESSION 
[(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
-PREHOOK: query: explain select table1.id, table1.val, table1.val1 from table1 
left semi join table3 on table1.dimid = table3.id where table1.val = 't1val01'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select table1.id, table1.val, table1.val1 from table1 
left semi join table3 on table1.dimid = table3.id where table1.val = 't1val01'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: table1
-Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: ((val = 't1val01') and dimid is not null) (type: 
boolean)
-  Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: id (type: int), val1 (type: string), dimid (type: 
int)
-outputColumnNames: _col0, _col2, _col3
- 

[41/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/bucket4.q.out
--
diff --git a/ql/src/test/results/clientpositive/bucket4.q.out 
b/ql/src/test/results/clientpositive/bucket4.q.out
deleted file mode 100644
index d06c7ed..000
--- a/ql/src/test/results/clientpositive/bucket4.q.out
+++ /dev/null
@@ -1,467 +0,0 @@
-PREHOOK: query: CREATE TABLE bucket4_1(key int, value string) CLUSTERED BY 
(key) SORTED BY (key) INTO 2 BUCKETS
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@bucket4_1
-POSTHOOK: query: CREATE TABLE bucket4_1(key int, value string) CLUSTERED BY 
(key) SORTED BY (key) INTO 2 BUCKETS
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@bucket4_1
-PREHOOK: query: explain extended
-insert overwrite table bucket4_1
-select * from src
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-insert overwrite table bucket4_1
-select * from src
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: src
-Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE 
Column stats: NONE
-GatherStats: false
-Select Operator
-  expressions: key (type: string), value (type: string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE 
Column stats: NONE
-  Reduce Output Operator
-key expressions: UDFToInteger(_col0) (type: int)
-null sort order: a
-sort order: +
-Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE 
Column stats: NONE
-tag: -1
-value expressions: _col0 (type: string), _col1 (type: string)
-auto parallelism: false
-  Path -> Alias:
- A masked pattern was here 
-  Path -> Partition:
- A masked pattern was here 
-  Partition
-base file name: src
-input format: org.apache.hadoop.mapred.TextInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-properties:
-  bucket_count -1
-  columns key,value
-  columns.comments 'default','default'
-  columns.types string:string
- A masked pattern was here 
-  name default.src
-  numFiles 1
-  numRows 0
-  rawDataSize 0
-  serialization.ddl struct src { string key, string value}
-  serialization.format 1
-  serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  totalSize 5812
- A masked pattern was here 
-serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  properties:
-bucket_count -1
-columns key,value
-columns.comments 'default','default'
-columns.types string:string
- A masked pattern was here 
-name default.src
-numFiles 1
-numRows 0
-rawDataSize 0
-serialization.ddl struct src { string key, string value}
-serialization.format 1
-serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-totalSize 5812
- A masked pattern was here 
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  name: default.src
-name: default.src
-  Truncated Path -> Alias:
-/src [src]
-  Needs Tagging: false
-  Reduce Operator Tree:
-Select Operator
-  expressions: UDFToInteger(VALUE._col0) (type: int), VALUE._col1 
(type: string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE 
Column stats: NONE
-  File Output Operator
-compressed: false
-GlobalTableId: 1
- A masked pattern was here 
-NumFilesPerFileSink: 2
-Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE 
Column stats: NONE
- A masked pattern was here 
-table:
-input format: org.apache.hadoop.mapred.TextInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-properties:
-

[16/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/count.q.out
--
diff --git a/ql/src/test/results/clientpositive/count.q.out 
b/ql/src/test/results/clientpositive/count.q.out
deleted file mode 100644
index 641da27..000
--- a/ql/src/test/results/clientpositive/count.q.out
+++ /dev/null
@@ -1,752 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
-create table abcd (a int, b int, c int, d int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@abcd
-POSTHOOK: query: -- SORT_QUERY_RESULTS
-create table abcd (a int, b int, c int, d int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@abcd
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE 
abcd
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@abcd
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE 
abcd
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@abcd
-PREHOOK: query: select * from abcd
-PREHOOK: type: QUERY
-PREHOOK: Input: default@abcd
- A masked pattern was here 
-POSTHOOK: query: select * from abcd
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@abcd
- A masked pattern was here 
-10 100 45  4
-10 100 NULL5
-10 100050  1
-100100 10  3
-12 100 75  7
-12 NULL80  2
-NULL   35  23  6
-PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) 
from abcd group by a
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select a, count(distinct b), count(distinct c), 
sum(d) from abcd group by a
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: abcd
-Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column 
stats: NONE
-Select Operator
-  expressions: a (type: int), b (type: int), c (type: int), d 
(type: int)
-  outputColumnNames: a, b, c, d
-  Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE 
Column stats: NONE
-  Group By Operator
-aggregations: count(DISTINCT b), count(DISTINCT c), sum(d)
-keys: a (type: int), b (type: int), c (type: int)
-mode: hash
-outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE 
Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: int), _col1 (type: int), _col2 
(type: int)
-  sort order: +++
-  Map-reduce partition columns: _col0 (type: int)
-  Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE 
Column stats: NONE
-  value expressions: _col5 (type: bigint)
-  Reduce Operator Tree:
-Group By Operator
-  aggregations: count(DISTINCT KEY._col1:0._col0), count(DISTINCT 
KEY._col1:1._col0), sum(VALUE._col2)
-  keys: KEY._col0 (type: int)
-  mode: mergepartial
-  outputColumnNames: _col0, _col1, _col2, _col3
-  Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column 
stats: NONE
-  File Output Operator
-compressed: false
-Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column 
stats: NONE
-table:
-input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-Fetch Operator
-  limit: -1
-  Processor Tree:
-ListSink
-
-PREHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from 
abcd group by a
-PREHOOK: type: QUERY
-PREHOOK: Input: default@abcd
- A masked pattern was here 
-POSTHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from 
abcd group by a
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@abcd
- A masked pattern was here 
-10 2   2   10
-1001   1   3
-12 1   2   9
-NULL   1   1   6
-PREHOOK: query: explain select count(1), count(*), count(a), count(b), 
count(c), count(d), count(distinct a), count(distinct b), count(distinct c), 
count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct 
c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), 
count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), 
count(distinct a,b,d), count(distinct a,b,c,d) from abcd
-PREHOOK: type: QUERY
-POSTHOOK: query: expl

[06/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/join_nulls.q.out
--
diff --git a/ql/src/test/results/clientpositive/join_nulls.q.out 
b/ql/src/test/results/clientpositive/join_nulls.q.out
deleted file mode 100644
index 2401447..000
--- a/ql/src/test/results/clientpositive/join_nulls.q.out
+++ /dev/null
@@ -1,652 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
-
-CREATE TABLE myinput1(key int, value int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@myinput1
-POSTHOOK: query: -- SORT_QUERY_RESULTS
-
-CREATE TABLE myinput1(key int, value int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@myinput1
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in1.txt' INTO TABLE 
myinput1
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@myinput1
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in1.txt' INTO TABLE 
myinput1
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@myinput1
-Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Stage-1:MAPRED' is a cross product
-PREHOOK: query: SELECT * FROM myinput1 a JOIN myinput1 b
-PREHOOK: type: QUERY
-PREHOOK: Input: default@myinput1
- A masked pattern was here 
-POSTHOOK: query: SELECT * FROM myinput1 a JOIN myinput1 b
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@myinput1
- A masked pattern was here 
-100100 100 100
-100100 48  NULL
-100100 NULL35
-48 NULL100 100
-48 NULL48  NULL
-48 NULLNULL35
-NULL   35  100 100
-NULL   35  48  NULL
-NULL   35  NULL35
-Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Stage-1:MAPRED' is a cross product
-PREHOOK: query: SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b
-PREHOOK: type: QUERY
-PREHOOK: Input: default@myinput1
- A masked pattern was here 
-POSTHOOK: query: SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@myinput1
- A masked pattern was here 
-100100 100 100
-100100 48  NULL
-100100 NULL35
-48 NULL100 100
-48 NULL48  NULL
-48 NULLNULL35
-NULL   35  100 100
-NULL   35  48  NULL
-NULL   35  NULL35
-Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Stage-1:MAPRED' is a cross product
-PREHOOK: query: SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b
-PREHOOK: type: QUERY
-PREHOOK: Input: default@myinput1
- A masked pattern was here 
-POSTHOOK: query: SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@myinput1
- A masked pattern was here 
-100100 100 100
-100100 48  NULL
-100100 NULL35
-48 NULL100 100
-48 NULL48  NULL
-48 NULLNULL35
-NULL   35  100 100
-NULL   35  48  NULL
-NULL   35  NULL35
-PREHOOK: query: SELECT * FROM myinput1 a JOIN myinput1 b ON a.key = b.value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@myinput1
- A masked pattern was here 
-POSTHOOK: query: SELECT * FROM myinput1 a JOIN myinput1 b ON a.key = b.value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@myinput1
- A masked pattern was here 
-100100 100 100
-PREHOOK: query: SELECT * FROM myinput1 a JOIN myinput1 b ON a.key = b.key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@myinput1
- A masked pattern was here 
-POSTHOOK: query: SELECT * FROM myinput1 a JOIN myinput1 b ON a.key = b.key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@myinput1
- A masked pattern was here 
-100100 100 100
-48 NULL48  NULL
-PREHOOK: query: SELECT * FROM myinput1 a JOIN myinput1 b ON a.value = b.value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@myinput1
- A masked pattern was here 
-POSTHOOK: query: SELECT * FROM myinput1 a JOIN myinput1 b ON a.value = b.value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@myinput1
- A masked pattern was here 
-100100 100 100
-NULL   35  NULL35
-PREHOOK: query: SELECT * FROM myinput1 a JOIN myinput1 b ON a.value = b.value 
and a.key=b.key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@myinput1
- A masked pattern was here 
-POSTHOOK: query: SELECT * FROM myinput1 a JOIN myinput1 b ON a.value = b.value 
and a.key=b.key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@myinput1
- A masked pattern was here 
-100100 100 100
-PREHOOK: query: SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = 
b.value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@myinput1
- A masked pattern was here 
-POSTHOOK: query: SELECT * FROM 

[25/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/cbo_semijoin.q.out
--
diff --git a/ql/src/test/results/clientpositive/cbo_semijoin.q.out 
b/ql/src/test/results/clientpositive/cbo_semijoin.q.out
deleted file mode 100644
index bdd8125..000
--- a/ql/src/test/results/clientpositive/cbo_semijoin.q.out
+++ /dev/null
@@ -1,440 +0,0 @@
-PREHOOK: query: -- 12. SemiJoin
-select cbo_t1.c_int   from cbo_t1 left semi join   cbo_t2 on 
cbo_t1.key=cbo_t2.key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
-PREHOOK: Input: default@cbo_t2
-PREHOOK: Input: default@cbo_t2@dt=2014
- A masked pattern was here 
-POSTHOOK: query: -- 12. SemiJoin
-select cbo_t1.c_int   from cbo_t1 left semi join   cbo_t2 on 
cbo_t1.key=cbo_t2.key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
-POSTHOOK: Input: default@cbo_t2
-POSTHOOK: Input: default@cbo_t2@dt=2014
- A masked pattern was here 
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-PREHOOK: query: select cbo_t1.c_int   from cbo_t1 left semi join   
cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int 
> 0 or cbo_t1.c_float >= 0)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
-PREHOOK: Input: default@cbo_t2
-PREHOOK: Input: default@cbo_t2@dt=2014
- A masked pattern was here 
-POSTHOOK: query: select cbo_t1.c_int   from cbo_t1 left semi join   
cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int 
> 0 or cbo_t1.c_float >= 0)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
-POSTHOOK: Input: default@cbo_t2
-POSTHOOK: Input: default@cbo_t2@dt=2014
- A masked pattern was here 
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-PREHOOK: query: select * from (select c, b, a from (select key as a, c_int as 
b, cbo_t1.c_float as c from cbo_t1  where (cbo_t1.c_int + 1 == 2) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select 
cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2  where 
(cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 
on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 
0 or c >= 0)) R where  (b + 1 = 2) and (R.b > 0 or c >= 0)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
-PREHOOK: Input: default@cbo_t2
-PREHOOK: Input: default@cbo_t2@dt=2014
-PREHOOK: Input: default@cbo_t3
- A masked pattern was here 
-POSTHOOK: query: select * from (select c, b, a from (select key as a, c_int as 
b, cbo_t1.c_float as c from cbo_t1  where (cbo_t1.c_int + 1 == 2) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select 
cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2  where 
(cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 
on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 
0 or c >= 0)) R where  (b + 1 = 2) and (R.b > 0 or c >= 0)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
-POSTHOOK: Input: default@cbo_t2
-POSTHOOK: Input: default@cbo_t2@dt=2014
-POSTHOOK: Input: default@cbo_t3
- A masked pattern was here 
-1.011
-1.011
-1.011 
-1.011 
-1.01   1
-1.01   1
-1.01   1
-1.01   1
-1.01   1
-1.01   1
-1.01   1
-1.01   1
-1.01   1
-1.01   1
-1.01   1
-1.01   1
-1.01   1 
-1.01   1 
-PREHOOK: query: select * from (select cbo_t3.c_int, cbo_t1.c, b from (select 
key as a, c_int as b, cbo_t1.c_float as c from cbo_t1  where (cbo_t1.c_int + 1 
= 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join 
(select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2  where 
(cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 
on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t3.c_int  
== 2) and (b > 0 or c_int >= 0)) R where  (R.c_int + 1 = 2) and (R.b > 0 or 
c_int >= 0)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
-PREHOOK: Input: default@cbo_t2
-PREHOOK: Input: default@cbo_t2@dt=2014
-PREHOOK: Input: default@cbo_t3
- A masked pattern was here 
-POSTHOOK: query: select * from (select cbo_t3.c_int, cbo_t1.c, b from (select 
key as a, c_int as b, cbo_t1.c_float as c from cbo_t1  where (cbo_t1.c_int + 1 
= 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join 
(select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2  where 
(cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.

[19/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/correlationoptimizer3.q.out
--
diff --git a/ql/src/test/results/clientpositive/correlationoptimizer3.q.out 
b/ql/src/test/results/clientpositive/correlationoptimizer3.q.out
deleted file mode 100644
index 06fb699..000
--- a/ql/src/test/results/clientpositive/correlationoptimizer3.q.out
+++ /dev/null
@@ -1,1422 +0,0 @@
-PREHOOK: query: -- When Correlation Optimizer is turned off, 5 MR jobs will be 
generated.
--- When Correlation Optimizer is turned on, the subquery tmp will be evalauted
--- in a single MR job (including the subquery b, the subquery d, and b join d).
--- At the reduce side of the MR job evaluating tmp, two operation paths
--- (for subquery b and d) have different depths. The path starting from 
subquery b
--- is JOIN->GBY->JOIN, which has a depth of 3. While, the path starting from 
subquery d
--- is JOIN->JOIN. We should be able to handle this case.
-EXPLAIN
-SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
-FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value
-  FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = 
y.key) group by x.key) b
-  JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d
-  ON b.key = d.key) tmp
-PREHOOK: type: QUERY
-POSTHOOK: query: -- When Correlation Optimizer is turned off, 5 MR jobs will 
be generated.
--- When Correlation Optimizer is turned on, the subquery tmp will be evalauted
--- in a single MR job (including the subquery b, the subquery d, and b join d).
--- At the reduce side of the MR job evaluating tmp, two operation paths
--- (for subquery b and d) have different depths. The path starting from 
subquery b
--- is JOIN->GBY->JOIN, which has a depth of 3. While, the path starting from 
subquery d
--- is JOIN->JOIN. We should be able to handle this case.
-EXPLAIN
-SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
-FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value
-  FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = 
y.key) group by x.key) b
-  JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d
-  ON b.key = d.key) tmp
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-6
-  Stage-3 depends on stages: Stage-2
-  Stage-5 is a root stage
-  Stage-6 depends on stages: Stage-5
-  Stage-0 depends on stages: Stage-3
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: y
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: key is not null (type: boolean)
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: key (type: string)
-outputColumnNames: _col0
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  TableScan
-alias: x
-Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: key is not null (type: boolean)
-  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: key (type: string), value (type: string)
-outputColumnNames: _col0, _col1
-Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 25 Data size: 191 Basic stats: 
COMPLETE Column stats: NONE
-  value expressions: _col1 (type: string)
-  Reduce Operator Tree:
-Join Operator
-  condition map:
-   Inner Join 0 to 1
-  keys:
-0 _col0 (type: string)
-1 _col0 (type: string)
-  outputColumnNames: _col1, _col2
-  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: _col1 (type: string), _col2 (type: string)
-outputColumnNames: _col0, _col1
-Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLET

[46/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/auto_join_filters.q.out
--
diff --git a/ql/src/test/results/clientpositive/auto_join_filters.q.out 
b/ql/src/test/results/clientpositive/auto_join_filters.q.out
deleted file mode 100644
index e526284..000
--- a/ql/src/test/results/clientpositive/auto_join_filters.q.out
+++ /dev/null
@@ -1,540 +0,0 @@
-PREHOOK: query: CREATE TABLE myinput1(key int, value int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@myinput1
-POSTHOOK: query: CREATE TABLE myinput1(key int, value int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@myinput1
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE 
myinput1
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@myinput1
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE 
myinput1
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@myinput1
-Warning: Map Join MAPJOIN[20][bigTable=?] in task 'Stage-2:MAPRED' is a cross 
product
-PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 a 
JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 
40 AND b.value > 50 AND b.key = b.value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@myinput1
- A masked pattern was here 
-POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 
a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key 
> 40 AND b.value > 50 AND b.key = b.value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@myinput1
- A masked pattern was here 
-3078400
-Warning: Map Join MAPJOIN[18][bigTable=?] in task 'Stage-2:MAPRED' is a cross 
product
-PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 a 
LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value 
AND b.key > 40 AND b.value > 50 AND b.key = b.value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@myinput1
- A masked pattern was here 
-POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 
a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value 
AND b.key > 40 AND b.value > 50 AND b.key = b.value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@myinput1
- A masked pattern was here 
-4937935
-Warning: Map Join MAPJOIN[18][bigTable=?] in task 'Stage-2:MAPRED' is a cross 
product
-PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 a 
RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value 
AND b.key > 40 AND b.value > 50 AND b.key = b.value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@myinput1
- A masked pattern was here 
-POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 
a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = 
a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@myinput1
- A masked pattern was here 
-3080335
-Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Stage-1:MAPRED' is a cross product
-PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 a 
FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value 
AND b.key > 40 AND b.value > 50 AND b.key = b.value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@myinput1
- A masked pattern was here 
-POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 
a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value 
AND b.key > 40 AND b.value > 50 AND b.key = b.value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@myinput1
- A masked pattern was here 
-4939870
-PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a 
JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = 
a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@myinput1
- A masked pattern was here 
-POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a 
JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = 
a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@myinput1
- A masked pattern was here 
-3078400
-PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a 
JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = 
a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@myinput1
- A masked pattern was here 
-POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)

[01/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master d72121f0c -> 3e8ae68eb


http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/lineage2.q.out
--
diff --git a/ql/src/test/results/clientpositive/lineage2.q.out 
b/ql/src/test/results/clientpositive/lineage2.q.out
deleted file mode 100644
index 7224bce..000
--- a/ql/src/test/results/clientpositive/lineage2.q.out
+++ /dev/null
@@ -1,705 +0,0 @@
-PREHOOK: query: drop table if exists src2
-PREHOOK: type: DROPTABLE
-PREHOOK: query: create table src2 as select key key2, value value2 from src1
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src1
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src2
-{"version":"1.0","engine":"mr","database":"default","hash":"3a39d46286e4c2cd2139c9bb248f7b4f","queryText":"create
 table src2 as select key key2, value value2 from 
src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src2.value2"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
-PREHOOK: query: select * from src1 where key is not null and value is not null 
limit 3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src1
- A masked pattern was here 
-{"version":"1.0","engine":"mr","database":"default","hash":"b5b224847b2333e790a2c229434a04c8","queryText":"select
 * from src1 where key is not null and value is not null limit 
3","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"(src1.key
 is not null and src1.value is not 
null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
-238val_238
-   
-311val_311
-PREHOOK: query: select * from src1 where key > 10 and value > 'val' order by 
key limit 5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src1
- A masked pattern was here 
-{"version":"1.0","engine":"mr","database":"default","hash":"773d9d0ea92e797eae292ae1eeea11ab","queryText":"select
 * from src1 where key > 10 and value > 'val' order by key limit 
5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key)
 > 10.0) and (src1.value > 
'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
-146val_146
-150val_150
-213val_213
-238val_238
-255val_255
-PREHOOK: query: drop table if exists dest1
-PREHOOK: type: DROPTABLE
-PREHOOK: query: create table dest1 as select * from src1
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src1
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dest1
-{"version":"1.0","engine":"mr","database":"default","hash":"712fe958c357bcfc978b95c43eb19084","queryText":"create
 table dest1 as select * from 
src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
-PREHOOK: query: insert into table dest1 select * from src2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src2
-PREHOOK: Output: default@dest1
-{"version":"1.0","engine":"mr","database":"default","hash":"ecc718a966d8887b18084a55dd96f0bc","queryText":"insert
 into table dest1 select * from 
src2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
-PREHOOK: query: select key k, dest1.value from dest1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dest1
- A masked pattern was here 
-{"version":"1.0","engine":"mr","database":"default","hash":"416b6f4cd63edd4f9d8213d2d7819d21","queryText":"select
 key k, dest1.value from 
dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PRO

[21/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/correlationoptimizer1.q.out
--
diff --git a/ql/src/test/results/clientpositive/correlationoptimizer1.q.out 
b/ql/src/test/results/clientpositive/correlationoptimizer1.q.out
deleted file mode 100644
index 65b0ca5..000
--- a/ql/src/test/results/clientpositive/correlationoptimizer1.q.out
+++ /dev/null
@@ -1,3091 +0,0 @@
-PREHOOK: query: -- This query has a GroupByOperator folling JoinOperator and 
they share the same keys.
--- When Correlation Optimizer is turned off, three MR jobs will be generated.
--- When Correlation Optimizer is turned on, two MR jobs will be generated
--- and JoinOperator (on the column of key) and GroupByOperator (also on the 
column
--- of key) will be executed in the first MR job.
-EXPLAIN
-SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
-FROM (SELECT x.key AS key, count(1) AS cnt
-  FROM src1 x JOIN src y ON (x.key = y.key)
-  GROUP BY x.key) tmp
-PREHOOK: type: QUERY
-POSTHOOK: query: -- This query has a GroupByOperator folling JoinOperator and 
they share the same keys.
--- When Correlation Optimizer is turned off, three MR jobs will be generated.
--- When Correlation Optimizer is turned on, two MR jobs will be generated
--- and JoinOperator (on the column of key) and GroupByOperator (also on the 
column
--- of key) will be executed in the first MR job.
-EXPLAIN
-SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
-FROM (SELECT x.key AS key, count(1) AS cnt
-  FROM src1 x JOIN src y ON (x.key = y.key)
-  GROUP BY x.key) tmp
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-3 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-3
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: x
-Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: key is not null (type: boolean)
-  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: key (type: string)
-outputColumnNames: _col0
-Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 25 Data size: 191 Basic stats: 
COMPLETE Column stats: NONE
-  TableScan
-alias: y
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: key is not null (type: boolean)
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: key (type: string)
-outputColumnNames: _col0
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  Reduce Operator Tree:
-Join Operator
-  condition map:
-   Inner Join 0 to 1
-  keys:
-0 _col0 (type: string)
-1 _col0 (type: string)
-  outputColumnNames: _col0
-  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE 
Column stats: NONE
-  Group By Operator
-aggregations: count(1)
-keys: _col0 (type: string)
-mode: hash
-outputColumnNames: _col0, _col1
-Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE 
Column stats: NONE
-File Output Operator
-  compressed: false
-  table:
-  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-Map Reduce
-  Map Operator Tree:
-  TableScan
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE 
Column stats: NONE
-  value expressions: _col1 (type: bigint)
-  Reduce Operator

[30/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/cbo_limit.q.out
--
diff --git a/ql/src/test/results/clientpositive/cbo_limit.q.out 
b/ql/src/test/results/clientpositive/cbo_limit.q.out
deleted file mode 100644
index 13df214..000
--- a/ql/src/test/results/clientpositive/cbo_limit.q.out
+++ /dev/null
@@ -1,90 +0,0 @@
-PREHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit
-select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, 
cbo_t1.c_int, key order by x limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-POSTHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit
-select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, 
cbo_t1.c_int, key order by x limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-NULL   NULLNULL
-PREHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as 
x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by 
y, x order by x,y limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-POSTHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) 
as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group 
by y, x order by x,y limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-NULL   NULL1
-PREHOOK: query: select key from(select key from (select key from cbo_t1 limit 
5)cbo_t2  limit 5)cbo_t3  limit 5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-POSTHOOK: query: select key from(select key from (select key from cbo_t1 limit 
5)cbo_t2  limit 5)cbo_t3  limit 5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-1
-1
-1
-1
-1
-PREHOOK: query: select key, c_int from(select key, c_int from (select key, 
c_int from cbo_t1 order by c_int limit 5)cbo_t1  order by c_int limit 5)cbo_t2  
order by c_int limit 5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-POSTHOOK: query: select key, c_int from(select key, c_int from (select key, 
c_int from cbo_t1 order by c_int limit 5)cbo_t1  order by c_int limit 5)cbo_t2  
order by c_int limit 5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-NULL   NULL
-NULL   NULL
-1  1
-1  1
-1  1
-PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key 
order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r 
from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or 
cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r 
asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + 
cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by 
cbo_t3.c_int+c desc, c limit 5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
-PREHOOK: Input: default@cbo_t2
-PREHOOK: Input: default@cbo_t2@dt=2014
-PREHOOK: Input: default@cbo_t3
- A masked pattern was here 
-POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key 
order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r 
from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or 
cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r 
asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + 
cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by 
cbo_t3.c_int+c desc, c limit 5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
-POSTHOOK: Input: default@cbo_t2
-POSTHOOK: Input: default@cbo_t2@dt=2014
-POSTHOOK: Input: default@cbo_t3
- A masked pattern was here 
-1  12  6
-1  2   6
-PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key 
having cbo_t1.c_float > 0

[47/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/auto_join30.q.out
--
diff --git a/ql/src/test/results/clientpositive/auto_join30.q.out 
b/ql/src/test/results/clientpositive/auto_join30.q.out
deleted file mode 100644
index 361a270..000
--- a/ql/src/test/results/clientpositive/auto_join30.q.out
+++ /dev/null
@@ -1,2183 +0,0 @@
-PREHOOK: query: explain
-FROM 
-(SELECT src.* FROM src sort by key) x
-JOIN
-(SELECT src.* FROM src sort by value) Y
-ON (x.key = Y.key)
-select sum(hash(Y.key,Y.value))
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-FROM 
-(SELECT src.* FROM src sort by key) x
-JOIN
-(SELECT src.* FROM src sort by value) Y
-ON (x.key = Y.key)
-select sum(hash(Y.key,Y.value))
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1, Stage-4 , consists of Stage-8, Stage-9, 
Stage-2
-  Stage-8 has a backup stage: Stage-2
-  Stage-5 depends on stages: Stage-8
-  Stage-3 depends on stages: Stage-2, Stage-5, Stage-6
-  Stage-9 has a backup stage: Stage-2
-  Stage-6 depends on stages: Stage-9
-  Stage-2
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-3
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: src
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: key is not null (type: boolean)
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: key (type: string)
-outputColumnNames: _col0
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  Reduce Operator Tree:
-Select Operator
-  expressions: KEY.reducesinkkey0 (type: string)
-  outputColumnNames: _col0
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  File Output Operator
-compressed: false
-table:
-input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-7
-Conditional Operator
-
-  Stage: Stage-8
-Map Reduce Local Work
-  Alias -> Map Local Tables:
-$INTNAME1 
-  Fetch Operator
-limit: -1
-  Alias -> Map Local Operator Tree:
-$INTNAME1 
-  TableScan
-HashTable Sink Operator
-  keys:
-0 _col0 (type: string)
-1 _col0 (type: string)
-
-  Stage: Stage-5
-Map Reduce
-  Map Operator Tree:
-  TableScan
-Map Join Operator
-  condition map:
-   Inner Join 0 to 1
-  keys:
-0 _col0 (type: string)
-1 _col0 (type: string)
-  outputColumnNames: _col2, _col3
-  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE 
Column stats: NONE
-  Group By Operator
-aggregations: sum(hash(_col2,_col3))
-mode: hash
-outputColumnNames: _col0
-Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
-File Output Operator
-  compressed: false
-  table:
-  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-  Local Work:
-Map Reduce Local Work
-
-  Stage: Stage-3
-Map Reduce
-  Map Operator Tree:
-  TableScan
-Reduce Output Operator
-  sort order: 
-  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
-  value expressions: _col0 (type: bigint)
-  Reduce Operator Tree:
-Group By Operator
-  aggregations: sum(VALUE._col0)
-  mode: mergepartial
-  outputColumnNames: _col0
-  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
-  File Output Operator
-compressed: false
-Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
-table:
-input format: org.apache.hadoop.mapred.Seque

[15/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/disable_merge_for_bucketing.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/disable_merge_for_bucketing.q.out 
b/ql/src/test/results/clientpositive/disable_merge_for_bucketing.q.out
deleted file mode 100644
index 34a261c..000
--- a/ql/src/test/results/clientpositive/disable_merge_for_bucketing.q.out
+++ /dev/null
@@ -1,474 +0,0 @@
-PREHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY 
(key) INTO 2 BUCKETS
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@bucket2_1
-POSTHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY 
(key) INTO 2 BUCKETS
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@bucket2_1
-PREHOOK: query: explain extended
-insert overwrite table bucket2_1
-select * from src
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-insert overwrite table bucket2_1
-select * from src
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: src
-Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE 
Column stats: NONE
-GatherStats: false
-Select Operator
-  expressions: key (type: string), value (type: string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE 
Column stats: NONE
-  Reduce Output Operator
-null sort order: 
-sort order: 
-Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE 
Column stats: NONE
-tag: -1
-value expressions: _col0 (type: string), _col1 (type: string)
-auto parallelism: false
-  Path -> Alias:
- A masked pattern was here 
-  Path -> Partition:
- A masked pattern was here 
-  Partition
-base file name: src
-input format: org.apache.hadoop.mapred.TextInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-properties:
-  bucket_count -1
-  columns key,value
-  columns.comments 'default','default'
-  columns.types string:string
- A masked pattern was here 
-  name default.src
-  numFiles 1
-  numRows 0
-  rawDataSize 0
-  serialization.ddl struct src { string key, string value}
-  serialization.format 1
-  serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  totalSize 5812
- A masked pattern was here 
-serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  properties:
-bucket_count -1
-columns key,value
-columns.comments 'default','default'
-columns.types string:string
- A masked pattern was here 
-name default.src
-numFiles 1
-numRows 0
-rawDataSize 0
-serialization.ddl struct src { string key, string value}
-serialization.format 1
-serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-totalSize 5812
- A masked pattern was here 
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  name: default.src
-name: default.src
-  Truncated Path -> Alias:
-/src [src]
-  Needs Tagging: false
-  Reduce Operator Tree:
-Select Operator
-  expressions: UDFToInteger(VALUE._col0) (type: int), VALUE._col1 
(type: string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE 
Column stats: NONE
-  File Output Operator
-compressed: false
-GlobalTableId: 1
- A masked pattern was here 
-NumFilesPerFileSink: 2
-Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE 
Column stats: NONE
- A masked pattern was here 
-table:
-input format: org.apache.hadoop.mapred.TextInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-properties:
-  COLUMN_ST

[03/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/limit_pushdown.q.out
--
diff --git a/ql/src/test/results/clientpositive/limit_pushdown.q.out 
b/ql/src/test/results/clientpositive/limit_pushdown.q.out
deleted file mode 100644
index 898662b..000
--- a/ql/src/test/results/clientpositive/limit_pushdown.q.out
+++ /dev/null
@@ -1,1432 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
-
--- HIVE-3562 Some limit can be pushed down to map stage
-
-explain
-select key,value from src order by key limit 20
-PREHOOK: type: QUERY
-POSTHOOK: query: -- SORT_QUERY_RESULTS
-
--- HIVE-3562 Some limit can be pushed down to map stage
-
-explain
-select key,value from src order by key limit 20
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: src
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Select Operator
-  expressions: key (type: string), value (type: string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Reduce Output Operator
-key expressions: _col0 (type: string)
-sort order: +
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-TopN Hash Memory Usage: 0.3
-value expressions: _col1 (type: string)
-  Reduce Operator Tree:
-Select Operator
-  expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: 
string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Limit
-Number of rows: 20
-Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
-File Output Operator
-  compressed: false
-  Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
-  table:
-  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-Fetch Operator
-  limit: 20
-  Processor Tree:
-ListSink
-
-PREHOOK: query: select key,value from src order by key limit 20
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
- A masked pattern was here 
-POSTHOOK: query: select key,value from src order by key limit 20
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
- A masked pattern was here 
-0  val_0
-0  val_0
-0  val_0
-10 val_10
-100val_100
-100val_100
-103val_103
-103val_103
-104val_104
-104val_104
-105val_105
-11 val_11
-111val_111
-113val_113
-113val_113
-114val_114
-116val_116
-118val_118
-118val_118
-119val_119
-PREHOOK: query: explain
-select key,value from src order by key desc limit 20
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-select key,value from src order by key desc limit 20
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: src
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Select Operator
-  expressions: key (type: string), value (type: string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Reduce Output Operator
-key expressions: _col0 (type: string)
-sort order: -
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-TopN Hash Memory Usage: 0.3
-value expressions: _col1 (type: string)
-  Reduce Operator Tree:
-Select Operator
-  expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: 
string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Limit
-Number of rows: 20
-Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
-File Output Operator
-  compressed: false
-  Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
-  table:
-  input format: 
o

[29/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/cbo_rp_join.q.out
--
diff --git a/ql/src/test/results/clientpositive/cbo_rp_join.q.out 
b/ql/src/test/results/clientpositive/cbo_rp_join.q.out
deleted file mode 100644
index c5e9858..000
--- a/ql/src/test/results/clientpositive/cbo_rp_join.q.out
+++ /dev/null
@@ -1,15028 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- 4. Test Select + Join + TS
-select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on 
cbo_t1.key=cbo_t2.key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
-PREHOOK: Input: default@cbo_t2
-PREHOOK: Input: default@cbo_t2@dt=2014
- A masked pattern was here 
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- 4. Test Select + Join + TS
-select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on 
cbo_t1.key=cbo_t2.key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
-POSTHOOK: Input: default@cbo_t2
-POSTHOOK: Input: default@cbo_t2@dt=2014
- A masked pattern was here 
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-1  1
-PREHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
-PREHOOK: Input: default@cbo_t3
- A masked pattern was here 
-POSTHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
-POSTHOOK: Input: default@cbo_t3
- A masked pattern was here 
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
- 1 
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-1 
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-PREHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3 where 
cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
-PREHOOK: Input: default@cbo_t3
- A masked pattern was here 
-POSTHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3 where 
cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
-POSTHOOK: Input: default@cbo_t3
- A masked pattern was here 
- 1
- 1
- 1
- 1
- 1 
- 1 
- 1 
- 1 
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1
-1 
-1 
-1 
-1 
-PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join  
cbo_t2 on cbo_t1.key=cb

[45/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/auto_smb_mapjoin_14.q.out
--
diff --git a/ql/src/test/results/clientpositive/auto_smb_mapjoin_14.q.out 
b/ql/src/test/results/clientpositive/auto_smb_mapjoin_14.q.out
deleted file mode 100644
index 577f5d7..000
--- a/ql/src/test/results/clientpositive/auto_smb_mapjoin_14.q.out
+++ /dev/null
@@ -1,1792 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
-
-CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) 
INTO 2 BUCKETS
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@tbl1
-POSTHOOK: query: -- SORT_QUERY_RESULTS
-
-CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) 
INTO 2 BUCKETS
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tbl1
-PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 2 BUCKETS
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@tbl2
-POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 2 BUCKETS
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tbl2
-PREHOOK: query: insert overwrite table tbl1
-select * from src where key < 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tbl1
-POSTHOOK: query: insert overwrite table tbl1
-select * from src where key < 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tbl1
-POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
-PREHOOK: query: insert overwrite table tbl2
-select * from src where key < 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tbl2
-POSTHOOK: query: insert overwrite table tbl2
-select * from src where key < 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tbl2
-POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
-PREHOOK: query: -- The join is being performed as part of sub-query. It should 
be converted to a sort-merge join
-explain
-select count(*) from (
-  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 
b on a.key = b.key
-) subq1
-PREHOOK: type: QUERY
-POSTHOOK: query: -- The join is being performed as part of sub-query. It 
should be converted to a sort-merge join
-explain
-select count(*) from (
-  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 
b on a.key = b.key
-) subq1
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: a
-Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: key is not null (type: boolean)
-  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: key (type: int)
-outputColumnNames: _col0
-Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE 
Column stats: NONE
-Sorted Merge Bucket Map Join Operator
-  condition map:
-   Inner Join 0 to 1
-  keys:
-0 _col0 (type: int)
-1 _col0 (type: int)
-  Group By Operator
-aggregations: count()
-mode: hash
-outputColumnNames: _col0
-Reduce Output Operator
-  sort order: 
-  value expressions: _col0 (type: bigint)
-  Reduce Operator Tree:
-Group By Operator
-  aggregations: count(VALUE._col0)
-  mode: mergepartial
-  outputColumnNames: _col0
-  File Output Operator
-compressed: false
-table:
-input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-Fetch Operator
-  limit: -1
-  Processor Tree:
-ListSink
-
-PREHOOK: query: select count(*) from (
-  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 
b on a.key = b.key
-) subq1
-PREHOOK: type: QUERY
-PREHOOK: Input:

[33/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/bucketsortoptimize_insert_6.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_6.q.out 
b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_6.q.out
deleted file mode 100644
index fe32f45..000
--- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_6.q.out
+++ /dev/null
@@ -1,1323 +0,0 @@
-PREHOOK: query: -- Create two bucketed and sorted tables
-CREATE TABLE test_table1 (key INT, key2 INT, value STRING) PARTITIONED BY (ds 
STRING)
-CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@test_table1
-POSTHOOK: query: -- Create two bucketed and sorted tables
-CREATE TABLE test_table1 (key INT, key2 INT, value STRING) PARTITIONED BY (ds 
STRING)
-CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@test_table1
-PREHOOK: query: CREATE TABLE test_table2 (key INT, key2 INT, value STRING) 
PARTITIONED BY (ds STRING)
-CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@test_table2
-POSTHOOK: query: CREATE TABLE test_table2 (key INT, key2 INT, value STRING) 
PARTITIONED BY (ds STRING)
-CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@test_table2
-PREHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) 
PARTITIONED BY (ds STRING)
-CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@test_table3
-POSTHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) 
PARTITIONED BY (ds STRING)
-CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@test_table3
-PREHOOK: query: FROM src
-INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT key, key+1, 
value where key < 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@test_table1@ds=1
-POSTHOOK: query: FROM src
-INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT key, key+1, 
value where key < 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@test_table1@ds=1
-POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key2 EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: FROM src
-INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT key, key+1, 
value where key < 100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@test_table2@ds=1
-POSTHOOK: query: FROM src
-INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT key, key+1, 
value where key < 100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@test_table2@ds=1
-POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key2 EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- Insert data into the bucketed table by selecting from 
another bucketed table
--- This should be a map-only operation, since the sort-order matches
-EXPLAIN
-INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
-SELECT a.key, a.key2, concat(a.value, b.value) 
-FROM test_table1 a JOIN test_table2 b 
-ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- Insert data into the bucketed table by selecting from 
another bucketed table
--- This should be a map-only operation, since the sort-order matches
-EXPLAIN
-INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
-SELECT a.key, a.key2, concat(a.value, b.value) 
-FROM test_table1 a JOIN test_table2 b 
-ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: a

[40/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/bucket_groupby.q.out
--
diff --git a/ql/src/test/results/clientpositive/bucket_groupby.q.out 
b/ql/src/test/results/clientpositive/bucket_groupby.q.out
deleted file mode 100644
index f808bba..000
--- a/ql/src/test/results/clientpositive/bucket_groupby.q.out
+++ /dev/null
@@ -1,1635 +0,0 @@
-PREHOOK: query: create table clustergroupby(key string, value string) 
partitioned by(ds string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@clustergroupby
-POSTHOOK: query: create table clustergroupby(key string, value string) 
partitioned by(ds string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@clustergroupby
-PREHOOK: query: describe extended clustergroupby
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@clustergroupby
-POSTHOOK: query: describe extended clustergroupby
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@clustergroupby
-keystring  
-value  string  
-ds string  
-
-# Partition Information 
-# col_name data_type   comment 
-
-ds string  
-
- A masked pattern was here 
-PREHOOK: query: alter table clustergroupby clustered by (key) into 1 buckets
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@clustergroupby
-PREHOOK: Output: default@clustergroupby
-POSTHOOK: query: alter table clustergroupby clustered by (key) into 1 buckets
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@clustergroupby
-POSTHOOK: Output: default@clustergroupby
-PREHOOK: query: insert overwrite table clustergroupby partition (ds='100') 
select key, value from src sort by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@clustergroupby@ds=100
-POSTHOOK: query: insert overwrite table clustergroupby partition (ds='100') 
select key, value from src sort by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@clustergroupby@ds=100
-POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: explain
-select key, count(1) from clustergroupby where ds='100' group by key order by 
key limit 10
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-select key, count(1) from clustergroupby where ds='100' group by key order by 
key limit 10
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: clustergroupby
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Select Operator
-  expressions: key (type: string)
-  outputColumnNames: _col0
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Group By Operator
-aggregations: count(1)
-keys: _col0 (type: string)
-mode: hash
-outputColumnNames: _col0, _col1
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  TopN Hash Memory Usage: 0.1
-  value expressions: _col1 (type: bigint)
-  Reduce Operator Tree:
-Group By Operator
-  aggregations: count(VALUE._col0)
-  keys: KEY._col0 (type: string)
-  mode: mergepartial
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
-  File Output Operator
-compressed: false
-table:
-input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-Map Reduce
-  Map Operator Tree:
-  TableScan
-Reduce Output Operator
-   

[54/54] hive git commit: HIVE-17556: The test udf_mask_hash.q is failing (Marta Kuczora via Zoltan Haindrich)

2017-09-21 Thread kgyrtkirk
HIVE-17556: The test udf_mask_hash.q is failing (Marta Kuczora via Zoltan 
Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3e8ae68e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3e8ae68e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3e8ae68e

Branch: refs/heads/master
Commit: 3e8ae68ebf6037c2375a5d536eea5081fb33b8c7
Parents: cb77007
Author: Marta Kuczora 
Authored: Thu Sep 21 10:25:34 2017 +0200
Committer: Zoltan Haindrich 
Committed: Thu Sep 21 10:25:34 2017 +0200

--
 ql/src/test/results/clientpositive/udf_mask_hash.q.out | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3e8ae68e/ql/src/test/results/clientpositive/udf_mask_hash.q.out
--
diff --git a/ql/src/test/results/clientpositive/udf_mask_hash.q.out 
b/ql/src/test/results/clientpositive/udf_mask_hash.q.out
index 200f817..79939fa 100644
--- a/ql/src/test/results/clientpositive/udf_mask_hash.q.out
+++ b/ql/src/test/results/clientpositive/udf_mask_hash.q.out
@@ -31,9 +31,9 @@ STAGE PLANS:
   Row Limit Per Split: 1
   Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column 
stats: COMPLETE
   Select Operator
-expressions: 'dd78d68ad1b23bde126812482dd70ac6' (type: string)
+expressions: 
'8b44d559dc5d60e4453c9b4edf2a455fbce054bb8504cd3eb9b5f391bd239c90' (type: 
string)
 outputColumnNames: _col0
-Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE 
Column stats: COMPLETE
+Statistics: Num rows: 1 Data size: 148 Basic stats: COMPLETE 
Column stats: COMPLETE
 ListSink
 
 PREHOOK: query: select mask_hash('TestString-123'),
@@ -58,4 +58,4 @@ POSTHOOK: query: select mask_hash('TestString-123'),
 POSTHOOK: type: QUERY
 POSTHOOK: Input: _dummy_database@_dummy_table
  A masked pattern was here 
-dd78d68ad1b23bde126812482dd70ac6   dd78d68ad1b23bde126812482dd70ac6
835735ba20f1297683efca69fabd0fba


NULLNULLNULLNULLNULL
+8b44d559dc5d60e4453c9b4edf2a455fbce054bb8504cd3eb9b5f391bd239c90   
8b44d559dc5d60e4453c9b4edf2a455fbce054bb8504cd3eb9b5f391bd239c90
30a88603135d3a6f7a66b4f9193da1ab4423aed45fb8fe736c2f2a08977f2bdd


NULLNULLNULLNULLNULL



[32/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/cbo_gby.q.out
--
diff --git a/ql/src/test/results/clientpositive/cbo_gby.q.out 
b/ql/src/test/results/clientpositive/cbo_gby.q.out
deleted file mode 100644
index 04597a7..000
--- a/ql/src/test/results/clientpositive/cbo_gby.q.out
+++ /dev/null
@@ -1,124 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
-
--- 6. Test Select + TS + Join + Fil + GB + GB Having
-select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, 
cbo_t1.c_int, key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-POSTHOOK: query: -- SORT_QUERY_RESULTS
-
--- 6. Test Select + TS + Join + Fil + GB + GB Having
-select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, 
cbo_t1.c_int, key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
- 1 4   2
- 1 4   2
-1  4   12
-1  4   2
-NULL   NULLNULL
-PREHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as 
x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by 
y, x
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-POSTHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) 
as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group 
by y, x
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-5.012  1
-5.02   3
-NULL   NULL1
-PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key 
order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from 
cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 
0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on 
cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or 
c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
-PREHOOK: Input: default@cbo_t2
-PREHOOK: Input: default@cbo_t2@dt=2014
-PREHOOK: Input: default@cbo_t3
- A masked pattern was here 
-POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key 
order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from 
cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 
0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on 
cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or 
c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
-POSTHOOK: Input: default@cbo_t2
-POSTHOOK: Input: default@cbo_t2@dt=2014
-POSTHOOK: Input: default@cbo_t3
- A masked pattern was here 
-1  12  6
-1  2   6
-PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key 
having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, 
c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and 
(cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key  
having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + 
cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c  having 
cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0  order by 
cbo_t3.c_int % c asc, cbo_t3.c_int desc
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
-PREHOOK: Input: default@cbo_t2
-PREHOOK: Input: default@cbo_t2@dt=2014
-PREHOOK: Input: default@cbo_t3
- A masked pattern was here 
-POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key 
having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>

[49/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
--
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_1.q.out 
b/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
deleted file mode 100644
index 4cf6df1..000
--- a/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
+++ /dev/null
@@ -1,1379 +0,0 @@
-PREHOOK: query: drop table src_multi1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table src_multi1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table src_multi1 like src
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_multi1
-POSTHOOK: query: create table src_multi1 like src
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_multi1
-PREHOOK: query: insert overwrite table src_multi1 select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@src_multi1
-POSTHOOK: query: insert overwrite table src_multi1 select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@src_multi1
-POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
-PREHOOK: query: explain extended select * from src_multi1
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended select * from src_multi1
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-Fetch Operator
-  limit: -1
-  Processor Tree:
-TableScan
-  alias: src_multi1
-  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE 
Column stats: COMPLETE
-  GatherStats: false
-  Select Operator
-expressions: key (type: string), value (type: string)
-outputColumnNames: _col0, _col1
-Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE 
Column stats: COMPLETE
-ListSink
-
-PREHOOK: query: describe formatted src_multi1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@src_multi1
-POSTHOOK: query: describe formatted src_multi1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@src_multi1
-# col_name data_type   comment 
-
-keystring  default 
-value  string  default 
-
-# Detailed Table Information
-Database:  default  
- A masked pattern was here 
-Retention: 0
- A masked pattern was here 
-Table Type:MANAGED_TABLE
-Table Parameters:   
-   COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
-   numFiles1   
-   numRows 500 
-   rawDataSize 5312
-   totalSize   5812
- A masked pattern was here 
-
-# Storage Information   
-SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe  
 
-InputFormat:   org.apache.hadoop.mapred.TextInputFormat 
-OutputFormat:  
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat   
-Compressed:No   
-Num Buckets:   -1   
-Bucket Columns:[]   
-Sort Columns:  []   
-Storage Desc Params:
-   serialization.format1   
-PREHOOK: query: drop table a
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table a
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table b
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table b
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table a like src
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@a
-POSTHOOK: query: create table a like src
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@a
-PREHOOK: query: create table b like src
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@b
-POSTHOOK: query: create table b like src
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@b
-PREHOOK: query: from src
-insert overwrite table a select *
-insert overwrite table b select *
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@a
-PREHOOK: Output: default@b
-POSTHOOK: query: from src
-insert overwrite table a select *
-insert overwrite table b select *
-POSTHOOK: type: QUERY
-POSTH

[28/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/cbo_rp_limit.q.out
--
diff --git a/ql/src/test/results/clientpositive/cbo_rp_limit.q.out 
b/ql/src/test/results/clientpositive/cbo_rp_limit.q.out
deleted file mode 100644
index 13df214..000
--- a/ql/src/test/results/clientpositive/cbo_rp_limit.q.out
+++ /dev/null
@@ -1,90 +0,0 @@
-PREHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit
-select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, 
cbo_t1.c_int, key order by x limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-POSTHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit
-select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, 
cbo_t1.c_int, key order by x limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-NULL   NULLNULL
-PREHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as 
x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by 
y, x order by x,y limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-POSTHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) 
as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group 
by y, x order by x,y limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-NULL   NULL1
-PREHOOK: query: select key from(select key from (select key from cbo_t1 limit 
5)cbo_t2  limit 5)cbo_t3  limit 5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-POSTHOOK: query: select key from(select key from (select key from cbo_t1 limit 
5)cbo_t2  limit 5)cbo_t3  limit 5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-1
-1
-1
-1
-1
-PREHOOK: query: select key, c_int from(select key, c_int from (select key, 
c_int from cbo_t1 order by c_int limit 5)cbo_t1  order by c_int limit 5)cbo_t2  
order by c_int limit 5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-POSTHOOK: query: select key, c_int from(select key, c_int from (select key, 
c_int from cbo_t1 order by c_int limit 5)cbo_t1  order by c_int limit 5)cbo_t2  
order by c_int limit 5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-NULL   NULL
-NULL   NULL
-1  1
-1  1
-1  1
-PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key 
order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r 
from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or 
cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r 
asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + 
cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by 
cbo_t3.c_int+c desc, c limit 5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
-PREHOOK: Input: default@cbo_t2
-PREHOOK: Input: default@cbo_t2@dt=2014
-PREHOOK: Input: default@cbo_t3
- A masked pattern was here 
-POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key 
order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r 
from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or 
cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r 
asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + 
cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by 
cbo_t3.c_int+c desc, c limit 5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
-POSTHOOK: Input: default@cbo_t2
-POSTHOOK: Input: default@cbo_t2@dt=2014
-POSTHOOK: Input: default@cbo_t3
- A masked pattern was here 
-1  12  6
-1  2   6
-PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key 
having cbo_t1

[35/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/bucketpruning1.q.out
--
diff --git a/ql/src/test/results/clientpositive/bucketpruning1.q.out 
b/ql/src/test/results/clientpositive/bucketpruning1.q.out
deleted file mode 100644
index 22477d4..000
--- a/ql/src/test/results/clientpositive/bucketpruning1.q.out
+++ /dev/null
@@ -1,2282 +0,0 @@
-PREHOOK: query: CREATE TABLE srcbucket_pruned(key int, value string) 
partitioned by (ds string) CLUSTERED BY (key) INTO 16 BUCKETS STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@srcbucket_pruned
-POSTHOOK: query: CREATE TABLE srcbucket_pruned(key int, value string) 
partitioned by (ds string) CLUSTERED BY (key) INTO 16 BUCKETS STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@srcbucket_pruned
-PREHOOK: query: -- cannot prune 2-key scenarios without a smarter optimizer
-CREATE TABLE srcbucket_unpruned(key int, value string) partitioned by (ds 
string) CLUSTERED BY (key,value) INTO 16 BUCKETS STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@srcbucket_unpruned
-POSTHOOK: query: -- cannot prune 2-key scenarios without a smarter optimizer
-CREATE TABLE srcbucket_unpruned(key int, value string) partitioned by (ds 
string) CLUSTERED BY (key,value) INTO 16 BUCKETS STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@srcbucket_unpruned
-PREHOOK: query: -- good cases
-
-explain extended
-select * from srcbucket_pruned where key = 1
-PREHOOK: type: QUERY
-POSTHOOK: query: -- good cases
-
-explain extended
-select * from srcbucket_pruned where key = 1
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-srcbucket_pruned
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_ALLCOLREF
-  TOK_WHERE
- =
-TOK_TABLE_OR_COL
-   key
-1
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: srcbucket_pruned
-filterExpr: (key = 1) (type: boolean)
-buckets included: [1,] of 16
-Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-GatherStats: false
-Filter Operator
-  isSamplingPred: false
-  predicate: (key = 1) (type: boolean)
-  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-  Select Operator
-expressions: 1 (type: int), value (type: string), ds (type: 
string)
-outputColumnNames: _col0, _col1, _col2
-Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL 
Column stats: NONE
-File Output Operator
-  compressed: false
-  GlobalTableId: 0
- A masked pattern was here 
-  NumFilesPerFileSink: 1
-  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL 
Column stats: NONE
- A masked pattern was here 
-  table:
-  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-  properties:
-columns _col0,_col1,_col2
-columns.types int:string:string
-escape.delim \
-hive.serialization.extend.additional.nesting.levels 
true
-serialization.escape.crlf true
-serialization.format 1
-serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  TotalFiles: 1
-  GatherStats: false
-  MultiFileSpray: false
-
-  Stage: Stage-0
-Fetch Operator
-  limit: -1
-  Processor Tree:
-ListSink
-
-PREHOOK: query: explain extended
-select * from srcbucket_pruned where key = 16
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from srcbucket_pruned where key = 16
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-srcbucket_pruned
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_ALLCOLREF
-  TOK_WHERE
- =
-TOK_TABLE_OR_COL
-

[43/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/auto_sortmerge_join_8.q.out
--
diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_8.q.out 
b/ql/src/test/results/clientpositive/auto_sortmerge_join_8.q.out
deleted file mode 100644
index 6db35a6..000
--- a/ql/src/test/results/clientpositive/auto_sortmerge_join_8.q.out
+++ /dev/null
@@ -1,1459 +0,0 @@
-PREHOOK: query: -- small 2 part, 2 bucket & big 2 part, 4 bucket
-CREATE TABLE bucket_small (key string, value string) partitioned by (ds 
string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@bucket_small
-POSTHOOK: query: -- small 2 part, 2 bucket & big 2 part, 4 bucket
-CREATE TABLE bucket_small (key string, value string) partitioned by (ds 
string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@bucket_small
-PREHOOK: query: load data local inpath 
'../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small 
partition(ds='2008-04-08')
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@bucket_small
-POSTHOOK: query: load data local inpath 
'../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small 
partition(ds='2008-04-08')
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@bucket_small
-POSTHOOK: Output: default@bucket_small@ds=2008-04-08
-PREHOOK: query: load data local inpath 
'../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small 
partition(ds='2008-04-08')
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@bucket_small@ds=2008-04-08
-POSTHOOK: query: load data local inpath 
'../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small 
partition(ds='2008-04-08')
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@bucket_small@ds=2008-04-08
-PREHOOK: query: load data local inpath 
'../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small 
partition(ds='2008-04-09')
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@bucket_small
-POSTHOOK: query: load data local inpath 
'../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small 
partition(ds='2008-04-09')
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@bucket_small
-POSTHOOK: Output: default@bucket_small@ds=2008-04-09
-PREHOOK: query: load data local inpath 
'../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small 
partition(ds='2008-04-09')
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@bucket_small@ds=2008-04-09
-POSTHOOK: query: load data local inpath 
'../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small 
partition(ds='2008-04-09')
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@bucket_small@ds=2008-04-09
-PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned 
by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS 
TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@bucket_big
-POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) 
partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS 
STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@bucket_big
-PREHOOK: query: load data local inpath 
'../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big 
partition(ds='2008-04-08')
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@bucket_big
-POSTHOOK: query: load data local inpath 
'../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big 
partition(ds='2008-04-08')
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@bucket_big
-POSTHOOK: Output: default@bucket_big@ds=2008-04-08
-PREHOOK: query: load data local inpath 
'../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big 
partition(ds='2008-04-08')
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@bucket_big@ds=2008-04-08
-POSTHOOK: query: load data local inpath 
'../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big 
partition(ds='2008-04-08')
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@bucket_big@ds=2008-04-08
-PREHOOK: query: load data local inpath 
'../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big 
partition(ds='2008-04-08')
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@bucket_big@ds=2008-04-08

[13/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
--
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out 
b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
deleted file mode 100644
index d24ee16..000
--- a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
+++ /dev/null
@@ -1,3041 +0,0 @@
-PREHOOK: query: create table over1k(
-   t tinyint,
-   si smallint,
-   i int,
-   b bigint,
-   f float,
-   d double,
-   bo boolean,
-   s string,
-   ts timestamp,
-   dec decimal(4,2),
-   bin binary)
-   row format delimited
-   fields terminated by '|'
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1k
-POSTHOOK: query: create table over1k(
-   t tinyint,
-   si smallint,
-   i int,
-   b bigint,
-   f float,
-   d double,
-   bo boolean,
-   s string,
-   ts timestamp,
-   dec decimal(4,2),
-   bin binary)
-   row format delimited
-   fields terminated by '|'
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1k
-PREHOOK: query: load data local inpath '../../data/files/over1k' into table 
over1k
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@over1k
-POSTHOOK: query: load data local inpath '../../data/files/over1k' into table 
over1k
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@over1k
-PREHOOK: query: create table over1k_part(
-   si smallint,
-   i int,
-   b bigint,
-   f float)
-   partitioned by (ds string, t tinyint)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1k_part
-POSTHOOK: query: create table over1k_part(
-   si smallint,
-   i int,
-   b bigint,
-   f float)
-   partitioned by (ds string, t tinyint)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1k_part
-PREHOOK: query: create table over1k_part_limit like over1k_part
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1k_part_limit
-POSTHOOK: query: create table over1k_part_limit like over1k_part
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1k_part_limit
-PREHOOK: query: create table over1k_part_buck(
-   si smallint,
-   i int,
-   b bigint,
-   f float)
-   partitioned by (t tinyint)
-   clustered by (si) into 4 buckets
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1k_part_buck
-POSTHOOK: query: create table over1k_part_buck(
-   si smallint,
-   i int,
-   b bigint,
-   f float)
-   partitioned by (t tinyint)
-   clustered by (si) into 4 buckets
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1k_part_buck
-PREHOOK: query: create table over1k_part_buck_sort(
-   si smallint,
-   i int,
-   b bigint,
-   f float)
-   partitioned by (t tinyint)
-   clustered by (si) 
-   sorted by (f) into 4 buckets
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1k_part_buck_sort
-POSTHOOK: query: create table over1k_part_buck_sort(
-   si smallint,
-   i int,
-   b bigint,
-   f float)
-   partitioned by (t tinyint)
-   clustered by (si) 
-   sorted by (f) into 4 buckets
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1k_part_buck_sort
-PREHOOK: query: -- map-only jobs converted to map-reduce job by 
hive.optimize.sort.dynamic.partition optimization
-explain insert overwrite table over1k_part partition(ds="foo", t) select 
si,i,b,f,t from over1k where t is null or t=27
-PREHOOK: type: QUERY
-POSTHOOK: query: -- map-only jobs converted to map-reduce job by 
hive.optimize.sort.dynamic.partition optimization
-explain insert overwrite table over1k_part partition(ds="foo", t) select 
si,i,b,f,t from over1k where t is null or t=27
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: over1k
-Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: (t is null or (t = 27)) (type: boolean)
-  Stat

[20/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/correlationoptimizer2.q.out
--
diff --git a/ql/src/test/results/clientpositive/correlationoptimizer2.q.out 
b/ql/src/test/results/clientpositive/correlationoptimizer2.q.out
deleted file mode 100644
index d1f4e05..000
--- a/ql/src/test/results/clientpositive/correlationoptimizer2.q.out
+++ /dev/null
@@ -1,2257 +0,0 @@
-PREHOOK: query: -- In this query, subquery a and b both have a GroupByOperator 
and the a and b will be
--- joined. The key of JoinOperator is the same with both keys of 
GroupByOperators in subquery
--- a and b. When Correlation Optimizer is turned off, we have four MR jobs.
--- When Correlation Optimizer is turned on, 2 MR jobs will be generated.
--- The first job will evaluate subquery tmp (including subquery a, b, and the 
JoinOperator on a
--- and b).
-EXPLAIN
-SELECT SUM(HASH(key1)), SUM(HASH(cnt1)), SUM(HASH(key2)), SUM(HASH(cnt2))
-FROM (SELECT a.key AS key1, a.cnt AS cnt1, b.key AS key2, b.cnt AS cnt2
-  FROM (SELECT x.key as key, count(x.value) AS cnt FROM src x group by 
x.key) a
-  JOIN (SELECT y.key as key, count(y.value) AS cnt FROM src1 y group by 
y.key) b
-  ON (a.key = b.key)) tmp
-PREHOOK: type: QUERY
-POSTHOOK: query: -- In this query, subquery a and b both have a 
GroupByOperator and the a and b will be
--- joined. The key of JoinOperator is the same with both keys of 
GroupByOperators in subquery
--- a and b. When Correlation Optimizer is turned off, we have four MR jobs.
--- When Correlation Optimizer is turned on, 2 MR jobs will be generated.
--- The first job will evaluate subquery tmp (including subquery a, b, and the 
JoinOperator on a
--- and b).
-EXPLAIN
-SELECT SUM(HASH(key1)), SUM(HASH(cnt1)), SUM(HASH(key2)), SUM(HASH(cnt2))
-FROM (SELECT a.key AS key1, a.cnt AS cnt1, b.key AS key2, b.cnt AS cnt2
-  FROM (SELECT x.key as key, count(x.value) AS cnt FROM src x group by 
x.key) a
-  JOIN (SELECT y.key as key, count(y.value) AS cnt FROM src1 y group by 
y.key) b
-  ON (a.key = b.key)) tmp
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-3 depends on stages: Stage-2
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-3
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: x
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: key is not null (type: boolean)
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Group By Operator
-aggregations: count(value)
-keys: key (type: string)
-mode: hash
-outputColumnNames: _col0, _col1
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  value expressions: _col1 (type: bigint)
-  Reduce Operator Tree:
-Group By Operator
-  aggregations: count(VALUE._col0)
-  keys: KEY._col0 (type: string)
-  mode: mergepartial
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
-  File Output Operator
-compressed: false
-table:
-input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-Map Reduce
-  Map Operator Tree:
-  TableScan
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
-  value expressions: _col1 (type: bigint)
-  TableScan
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE 
Column stats: NONE
-  value expressions: _col1 (type: bigint)
-  Reduce Operator Tree:
-Join Operator
-  condition map:
-   Inner Join 0 to 1
-  keys:
-

[04/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/limit_join_transpose.q.out
--
diff --git a/ql/src/test/results/clientpositive/limit_join_transpose.q.out 
b/ql/src/test/results/clientpositive/limit_join_transpose.q.out
deleted file mode 100644
index 506f155..000
--- a/ql/src/test/results/clientpositive/limit_join_transpose.q.out
+++ /dev/null
@@ -1,1813 +0,0 @@
-PREHOOK: query: explain
-select *
-from src src1 left outer join src src2
-on src1.key = src2.key
-limit 1
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-select *
-from src src1 left outer join src src2
-on src1.key = src2.key
-limit 1
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: src1
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Select Operator
-  expressions: key (type: string), value (type: string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Reduce Output Operator
-key expressions: _col0 (type: string)
-sort order: +
-Map-reduce partition columns: _col0 (type: string)
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-value expressions: _col1 (type: string)
-  TableScan
-alias: src2
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Select Operator
-  expressions: key (type: string), value (type: string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Reduce Output Operator
-key expressions: _col0 (type: string)
-sort order: +
-Map-reduce partition columns: _col0 (type: string)
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-value expressions: _col1 (type: string)
-  Reduce Operator Tree:
-Join Operator
-  condition map:
-   Left Outer Join0 to 1
-  keys:
-0 _col0 (type: string)
-1 _col0 (type: string)
-  outputColumnNames: _col0, _col1, _col2, _col3
-  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE 
Column stats: NONE
-  Limit
-Number of rows: 1
-Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column 
stats: NONE
-File Output Operator
-  compressed: false
-  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE 
Column stats: NONE
-  table:
-  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-Fetch Operator
-  limit: 1
-  Processor Tree:
-ListSink
-
-PREHOOK: query: select *
-from src src1 left outer join src src2
-on src1.key = src2.key
-limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
- A masked pattern was here 
-POSTHOOK: query: select *
-from src src1 left outer join src src2
-on src1.key = src2.key
-limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
- A masked pattern was here 
-0  val_0   0   val_0
-PREHOOK: query: explain
-select *
-from src src1 left outer join src src2
-on src1.key = src2.key
-limit 1
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-select *
-from src src1 left outer join src src2
-on src1.key = src2.key
-limit 1
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: src1
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Select Operator
-  expressions: key (type: string), value (type: string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Reduce Output Operator
-key expressions: _col0 (type: string)
-sort order: +
-Map-reduce partition columns: _col0 (type: string)
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-value expressions: _col1 (type: string)
-  TableSca

[23/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/column_access_stats.q.out
--
diff --git a/ql/src/test/results/clientpositive/column_access_stats.q.out 
b/ql/src/test/results/clientpositive/column_access_stats.q.out
deleted file mode 100644
index dd8ade2..000
--- a/ql/src/test/results/clientpositive/column_access_stats.q.out
+++ /dev/null
@@ -1,869 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- This test is used for testing the ColumnAccessAnalyzer
-
-CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T1
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@t1
-PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T2
-PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T3
-PREHOOK: query: CREATE TABLE T4(key STRING, val STRING) PARTITIONED BY (p 
STRING)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T4
-PREHOOK: query: -- Simple select queries
-SELECT key FROM T1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1
- A masked pattern was here 
-Table:default@t1
-Columns:key
-
-1
-2
-3
-7
-8
-8
-PREHOOK: query: SELECT key, val FROM T1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1
- A masked pattern was here 
-Table:default@t1
-Columns:key,val
-
-1  11
-2  12
-3  13
-7  17
-8  18
-8  28
-PREHOOK: query: SELECT 1 FROM T1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1
- A masked pattern was here 
-1
-1
-1
-1
-1
-1
-PREHOOK: query: SELECT key, val from T4 where p=1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t4
- A masked pattern was here 
-Table:default@t4
-Columns:key,p,val
-
-PREHOOK: query: SELECT val FROM T4 where p=1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t4
- A masked pattern was here 
-Table:default@t4
-Columns:p,val
-
-PREHOOK: query: SELECT p, val FROM T4 where p=1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t4
- A masked pattern was here 
-Table:default@t4
-Columns:p,val
-
-PREHOOK: query: -- More complicated select queries
-EXPLAIN SELECT key FROM (SELECT key, val FROM T1) subq1
-PREHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-Fetch Operator
-  limit: -1
-  Processor Tree:
-TableScan
-  alias: t1
-  Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column 
stats: NONE
-  Select Operator
-expressions: key (type: string)
-outputColumnNames: _col0
-Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column 
stats: NONE
-ListSink
-
-PREHOOK: query: SELECT key FROM (SELECT key, val FROM T1) subq1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1
- A masked pattern was here 
-Table:default@t1
-Columns:key
-
-1
-2
-3
-7
-8
-8
-PREHOOK: query: EXPLAIN SELECT k FROM (SELECT key as k, val as v FROM T1) subq1
-PREHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-Fetch Operator
-  limit: -1
-  Processor Tree:
-TableScan
-  alias: t1
-  Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column 
stats: NONE
-  Select Operator
-expressions: key (type: string)
-outputColumnNames: _col0
-Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column 
stats: NONE
-ListSink
-
-PREHOOK: query: SELECT k FROM (SELECT key as k, val as v FROM T1) subq1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1
- A masked pattern was here 
-Table:default@t1
-Columns:key
-
-1
-2
-3
-7
-8
-8
-PREHOOK: query: SELECT key + 1 as k FROM T1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1
- A masked pattern was here 
-Table:default@t1
-Columns:key
-
-2.0
-3.0
-4.0
-8.0
-9.0
-9.0
-PREHOOK: query: SELECT key + val as k FROM T1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1
- A masked pattern was here 
-Table:default@t1
-Columns:key,val
-
-12.0
-14.0
-16.0
-24.0
-26.0
-36.0
-PREHOOK: query: -- Work with union
-EXPLAIN
-SELECT * FROM (
-SELECT key as c FROM T1
- UNION ALL
-SELECT val as c FROM T1
-) subq1
-PREHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: t1
-Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column 
stats: NONE
-  

[27/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/cbo_rp_udf_udaf_stats_opt.q.out
--
diff --git a/ql/src/test/results/clientpositive/cbo_rp_udf_udaf_stats_opt.q.out 
b/ql/src/test/results/clientpositive/cbo_rp_udf_udaf_stats_opt.q.out
deleted file mode 100644
index 3a589b4..000
--- a/ql/src/test/results/clientpositive/cbo_rp_udf_udaf_stats_opt.q.out
+++ /dev/null
@@ -1,124 +0,0 @@
-Warning: Value had a \n character in it.
-PREHOOK: query: -- SORT_QUERY_RESULTS
-
--- 8. Test UDF/UDAF
-select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) 
from cbo_t1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-POSTHOOK: query: -- SORT_QUERY_RESULTS
-
--- 8. Test UDF/UDAF
-select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) 
from cbo_t1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-20 18  18  1.0 1   1
-PREHOOK: query: select count(*), count(c_int) as a, sum(c_int), avg(c_int), 
max(c_int), min(c_int), case c_int when 0  then 1 when 1 then 2 else 3 end, 
sum(case c_int when 0  then 1 when 1 then 2 else 3 end) from cbo_t1 group by 
c_int order by a
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-POSTHOOK: query: select count(*), count(c_int) as a, sum(c_int), avg(c_int), 
max(c_int), min(c_int), case c_int when 0  then 1 when 1 then 2 else 3 end, 
sum(case c_int when 0  then 1 when 1 then 2 else 3 end) from cbo_t1 group by 
c_int order by a
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-18 18  18  1.0 1   1   2   36
-2  0   NULLNULLNULLNULL3   6
-PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as 
b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from 
cbo_t1) cbo_t1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as 
b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from 
cbo_t1) cbo_t1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-20 1   18  1.0 1   1
-PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as 
b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case 
c_int when 0  then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0  then 
1 when 1 then 2 else 3 end) as h from cbo_t1 group by c_int) cbo_t1 order by a
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as 
b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case 
c_int when 0  then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0  then 
1 when 1 then 2 else 3 end) as h from cbo_t1 group by c_int) cbo_t1 order by a
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-18 1   18  1.0 1   1   2   36
-2  0   NULLNULLNULLNULL3   6
-PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, 
sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) 
cbo_t1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
- A masked pattern was here 
-POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, 
sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) 
cbo_t1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
- A masked pattern was here 
-1  20  1   18
-PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct 
c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct 
c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cbo_t1
-PREHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct 
c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct 
c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cbo_t1
-POSTHOOK: Input: default@cbo_t1@dt=2014
- A masked pattern was here 
-1  20  1   1
-PREHOOK: query: select ke

[12/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out 
b/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out
deleted file mode 100644
index 387dfee..000
--- a/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out
+++ /dev/null
@@ -1,1750 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
-
-drop table ss
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- SORT_QUERY_RESULTS
-
-drop table ss
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table ss_orc
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table ss_orc
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table ss_part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table ss_part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table ss_part_orc
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table ss_part_orc
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table ss (
-ss_sold_date_sk int,
-ss_net_paid_inc_tax float,
-ss_net_profit float)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@ss
-POSTHOOK: query: create table ss (
-ss_sold_date_sk int,
-ss_net_paid_inc_tax float,
-ss_net_profit float)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@ss
-PREHOOK: query: create table ss_part (
-ss_net_paid_inc_tax float,
-ss_net_profit float)
-partitioned by (ss_sold_date_sk int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@ss_part
-POSTHOOK: query: create table ss_part (
-ss_net_paid_inc_tax float,
-ss_net_profit float)
-partitioned by (ss_sold_date_sk int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@ss_part
-PREHOOK: query: load data local inpath '../../data/files/dynpart_test.txt' 
overwrite into table ss
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@ss
-POSTHOOK: query: load data local inpath '../../data/files/dynpart_test.txt' 
overwrite into table ss
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@ss
-PREHOOK: query: explain insert overwrite table ss_part partition 
(ss_sold_date_sk)
-select ss_net_paid_inc_tax,
-  ss_net_profit,
-  ss_sold_date_sk
-  from ss
-  where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
-  group by ss_sold_date_sk,
-ss_net_paid_inc_tax,
-ss_net_profit
-distribute by ss_sold_date_sk
-PREHOOK: type: QUERY
-POSTHOOK: query: explain insert overwrite table ss_part partition 
(ss_sold_date_sk)
-select ss_net_paid_inc_tax,
-  ss_net_profit,
-  ss_sold_date_sk
-  from ss
-  where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
-  group by ss_sold_date_sk,
-ss_net_paid_inc_tax,
-ss_net_profit
-distribute by ss_sold_date_sk
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: ss
-Statistics: Num rows: 46 Data size: 553 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 
2452638)) (type: boolean)
-  Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE 
Column stats: NONE
-  Group By Operator
-keys: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: 
float), ss_net_profit (type: float)
-mode: hash
-outputColumnNames: _col0, _col1, _col2
-Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE 
Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: int), _col1 (type: float), 
_col2 (type: float)
-  sort order: +++
-  Map-reduce partition columns: _col0 (type: int)
-  Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE 
Column stats: NONE
-  Reduce Operator Tree:
-Group By Operator
-  keys: KEY._col0 (type: int), KEY._col1 (type: float), KEY._col2 
(type: float)
-  mode: mergepartial
-  outputColumnNames: _col0, _col1, _col2
-  Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column 
stats: NONE
-  Select Operator
-expressions: _col1 (type: float), _col2 (type: float), _col0 
(type: int)
-outputColumnNames: _col0, _col1, _col2
-Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column 
stats: NONE
-File Output Operator
-  compressed: false
-  Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE 
Column stats: NONE
-  tabl

[09/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/insert_into_with_schema.q.out
--
diff --git a/ql/src/test/results/clientpositive/insert_into_with_schema.q.out 
b/ql/src/test/results/clientpositive/insert_into_with_schema.q.out
deleted file mode 100644
index 040d118..000
--- a/ql/src/test/results/clientpositive/insert_into_with_schema.q.out
+++ /dev/null
@@ -1,348 +0,0 @@
-PREHOOK: query: -- set of tests HIVE-9481
-
-drop database if exists x314 cascade
-PREHOOK: type: DROPDATABASE
-POSTHOOK: query: -- set of tests HIVE-9481
-
-drop database if exists x314 cascade
-POSTHOOK: type: DROPDATABASE
-PREHOOK: query: create database x314
-PREHOOK: type: CREATEDATABASE
-PREHOOK: Output: database:x314
-POSTHOOK: query: create database x314
-POSTHOOK: type: CREATEDATABASE
-POSTHOOK: Output: database:x314
-PREHOOK: query: use x314
-PREHOOK: type: SWITCHDATABASE
-PREHOOK: Input: database:x314
-POSTHOOK: query: use x314
-POSTHOOK: type: SWITCHDATABASE
-POSTHOOK: Input: database:x314
-PREHOOK: query: create table source(s1 int, s2 int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:x314
-PREHOOK: Output: x314@source
-POSTHOOK: query: create table source(s1 int, s2 int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:x314
-POSTHOOK: Output: x314@source
-PREHOOK: query: create table target1(x int, y int, z int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:x314
-PREHOOK: Output: x314@target1
-POSTHOOK: query: create table target1(x int, y int, z int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:x314
-POSTHOOK: Output: x314@target1
-PREHOOK: query: create table target2(x int, y int, z int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:x314
-PREHOOK: Output: x314@target2
-POSTHOOK: query: create table target2(x int, y int, z int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:x314
-POSTHOOK: Output: x314@target2
-PREHOOK: query: insert into source(s2,s1) values(2,1)
-PREHOOK: type: QUERY
-PREHOOK: Output: x314@source
-POSTHOOK: query: insert into source(s2,s1) values(2,1)
-POSTHOOK: type: QUERY
-POSTHOOK: Output: x314@source
-POSTHOOK: Lineage: source.s1 EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
-POSTHOOK: Lineage: source.s2 EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
-PREHOOK: query: -- expect source to contain 1 row (1,2)
-select * from source
-PREHOOK: type: QUERY
-PREHOOK: Input: x314@source
- A masked pattern was here 
-POSTHOOK: query: -- expect source to contain 1 row (1,2)
-select * from source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: x314@source
- A masked pattern was here 
-1  2
-PREHOOK: query: insert into target1(z,x) select * from source
-PREHOOK: type: QUERY
-PREHOOK: Input: x314@source
-PREHOOK: Output: x314@target1
-POSTHOOK: query: insert into target1(z,x) select * from source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: x314@source
-POSTHOOK: Output: x314@target1
-POSTHOOK: Lineage: target1.x SIMPLE [(source)source.FieldSchema(name:s2, 
type:int, comment:null), ]
-POSTHOOK: Lineage: target1.y SIMPLE []
-POSTHOOK: Lineage: target1.z SIMPLE [(source)source.FieldSchema(name:s1, 
type:int, comment:null), ]
-PREHOOK: query: -- expect target1 to contain 1 row (2,NULL,1)
-select * from target1
-PREHOOK: type: QUERY
-PREHOOK: Input: x314@target1
- A masked pattern was here 
-POSTHOOK: query: -- expect target1 to contain 1 row (2,NULL,1)
-select * from target1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: x314@target1
- A masked pattern was here 
-2  NULL1
-PREHOOK: query: -- note that schema spec for target1 and target2 are different
-from source insert into target1(x,y) select * insert into target2(x,z) select 
s2,s1
-PREHOOK: type: QUERY
-PREHOOK: Input: x314@source
-PREHOOK: Output: x314@target1
-PREHOOK: Output: x314@target2
-POSTHOOK: query: -- note that schema spec for target1 and target2 are different
-from source insert into target1(x,y) select * insert into target2(x,z) select 
s2,s1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: x314@source
-POSTHOOK: Output: x314@target1
-POSTHOOK: Output: x314@target2
-POSTHOOK: Lineage: target1.x SIMPLE [(source)source.FieldSchema(name:s1, 
type:int, comment:null), ]
-POSTHOOK: Lineage: target1.y SIMPLE [(source)source.FieldSchema(name:s2, 
type:int, comment:null), ]
-POSTHOOK: Lineage: target1.z SIMPLE []
-POSTHOOK: Lineage: target2.x SIMPLE [(source)source.FieldSchema(name:s2, 
type:int, comment:null), ]
-POSTHOOK: Lineage: target2.y SIMPLE []
-POSTHOOK: Lineage: target2.z SIMPLE [(source)source.FieldSchema(name:s1, 
type:int, comment:null), ]
-PREHOOK: query: --expect target1 to have 2rows (2,NULL,1), (1,2,NULL)
-select * from target1 order by x,y,z
-PREHOOK: type: QUERY
-PREHOOK: Input: x314@target1
- A masked pa

[44/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/auto_sortmerge_join_6.q.out
--
diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_6.q.out 
b/ql/src/test/results/clientpositive/auto_sortmerge_join_6.q.out
deleted file mode 100644
index 7cf01e5..000
--- a/ql/src/test/results/clientpositive/auto_sortmerge_join_6.q.out
+++ /dev/null
@@ -1,1353 +0,0 @@
-PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 2 BUCKETS
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@tbl1
-POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 2 BUCKETS
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tbl1
-PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 2 BUCKETS
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@tbl2
-POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 2 BUCKETS
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tbl2
-PREHOOK: query: CREATE TABLE tbl3(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 2 BUCKETS
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@tbl3
-POSTHOOK: query: CREATE TABLE tbl3(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 2 BUCKETS
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tbl3
-PREHOOK: query: CREATE TABLE tbl4(key int, value string) CLUSTERED BY (value) 
SORTED BY (value) INTO 2 BUCKETS
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@tbl4
-POSTHOOK: query: CREATE TABLE tbl4(key int, value string) CLUSTERED BY (value) 
SORTED BY (value) INTO 2 BUCKETS
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tbl4
-PREHOOK: query: insert overwrite table tbl1 select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tbl1
-POSTHOOK: query: insert overwrite table tbl1 select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tbl1
-POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
-PREHOOK: query: insert overwrite table tbl2 select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tbl2
-POSTHOOK: query: insert overwrite table tbl2 select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tbl2
-POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
-PREHOOK: query: insert overwrite table tbl3 select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tbl3
-POSTHOOK: query: insert overwrite table tbl3 select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tbl3
-POSTHOOK: Lineage: tbl3.key EXPRESSION [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-POSTHOOK: Lineage: tbl3.value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
-PREHOOK: query: insert overwrite table tbl4 select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tbl4
-POSTHOOK: query: insert overwrite table tbl4 select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tbl4
-POSTHOOK: Lineage: tbl4.key EXPRESSION [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-POSTHOOK: Lineage: tbl4.value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
-PREHOOK: query: -- A SMB join is being followed by a regular join on a 
non-bucketed table on a different key
-
--- Three tests below are all the same query with different alias, which 
changes dispatch order of GenMapRedWalker
--- This is dependent to iteration order of HashMap, so can be meaningless in 
non-sun jdk
--- b = TS[0]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8]-SEL[9]-FS[10]
--- c = TS[1]-RS[7]-JOIN[8]
--- a = TS[2]-MAPJOIN[11]
-explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on 
c.value = a.value
-PREHOOK: type: QUERY
-POSTHOOK: query: -- A SMB join is being followed by a regular join on a 
non-bucketed table on a different key
-
--- Three tests below are all the same query with different alias, which 
changes dispatch order of GenMapRedWalker
--- This is dependent to iteration order of HashMap, so c

[07/54] [partial] hive git commit: HIVE-17550: Remove unreferenced q.out-s (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-09-21 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/join_acid_non_acid.q.out
--
diff --git a/ql/src/test/results/clientpositive/join_acid_non_acid.q.out 
b/ql/src/test/results/clientpositive/join_acid_non_acid.q.out
deleted file mode 100644
index f1c93cd0..000
--- a/ql/src/test/results/clientpositive/join_acid_non_acid.q.out
+++ /dev/null
@@ -1,54 +0,0 @@
-PREHOOK: query: CREATE TABLE orc_update_table (k1 INT, f1 STRING, op_code 
STRING)
-CLUSTERED BY (k1) INTO 2 BUCKETS
-STORED AS ORC TBLPROPERTIES("transactional"="true")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@orc_update_table
-POSTHOOK: query: CREATE TABLE orc_update_table (k1 INT, f1 STRING, op_code 
STRING)
-CLUSTERED BY (k1) INTO 2 BUCKETS
-STORED AS ORC TBLPROPERTIES("transactional"="true")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@orc_update_table
-PREHOOK: query: INSERT INTO TABLE orc_update_table VALUES (1, 'a', 'I')
-PREHOOK: type: QUERY
-PREHOOK: Output: default@orc_update_table
-POSTHOOK: query: INSERT INTO TABLE orc_update_table VALUES (1, 'a', 'I')
-POSTHOOK: type: QUERY
-POSTHOOK: Output: default@orc_update_table
-POSTHOOK: Lineage: orc_update_table.f1 SIMPLE 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
-POSTHOOK: Lineage: orc_update_table.k1 EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
-POSTHOOK: Lineage: orc_update_table.op_code SIMPLE 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, 
type:string, comment:), ]
-PREHOOK: query: CREATE TABLE orc_table (k1 INT, f1 STRING)
-CLUSTERED BY (k1) SORTED BY (k1) INTO 2 BUCKETS
-STORED AS ORC
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@orc_table
-POSTHOOK: query: CREATE TABLE orc_table (k1 INT, f1 STRING)
-CLUSTERED BY (k1) SORTED BY (k1) INTO 2 BUCKETS
-STORED AS ORC
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@orc_table
-PREHOOK: query: INSERT OVERWRITE TABLE orc_table VALUES (1, 'x')
-PREHOOK: type: QUERY
-PREHOOK: Output: default@orc_table
-POSTHOOK: query: INSERT OVERWRITE TABLE orc_table VALUES (1, 'x')
-POSTHOOK: type: QUERY
-POSTHOOK: Output: default@orc_table
-POSTHOOK: Lineage: orc_table.f1 SIMPLE 
[(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
-POSTHOOK: Lineage: orc_table.k1 EXPRESSION 
[(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
-PREHOOK: query: SELECT t1.*, t2.* FROM orc_table t1
-JOIN orc_update_table t2 ON t1.k1=t2.k1 ORDER BY t1.k1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_table
-PREHOOK: Input: default@orc_update_table
- A masked pattern was here 
-POSTHOOK: query: SELECT t1.*, t2.* FROM orc_table t1
-JOIN orc_update_table t2 ON t1.k1=t2.k1 ORDER BY t1.k1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_table
-POSTHOOK: Input: default@orc_update_table
- A masked pattern was here 
-1  x   1   a   I

http://git-wip-us.apache.org/repos/asf/hive/blob/6f5c1135/ql/src/test/results/clientpositive/join_filters.q.out
--
diff --git a/ql/src/test/results/clientpositive/join_filters.q.out 
b/ql/src/test/results/clientpositive/join_filters.q.out
deleted file mode 100644
index fa0584d..000
--- a/ql/src/test/results/clientpositive/join_filters.q.out
+++ /dev/null
@@ -1,1484 +0,0 @@
-PREHOOK: query: -- SORT_AND_HASH_QUERY_RESULTS
-
-CREATE TABLE myinput1(key int, value int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@myinput1
-POSTHOOK: query: -- SORT_AND_HASH_QUERY_RESULTS
-
-CREATE TABLE myinput1(key int, value int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@myinput1
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE 
myinput1
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@myinput1
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE 
myinput1
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@myinput1
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Stage-1:MAPRED' is a cross product
-PREHOOK: query: SELECT * FROM myinput1 a JOIN myinput1 b on a.key > 40 AND 
a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = 
b.value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@myinput1
- A masked pattern was here 
-POSTHOOK: query: SELECT * FROM myinput1 a JOIN myinput1 b on a.key > 40 AND 
a.value > 50 AND a.key = a.value AND b.key > 40 

hive git commit: HIVE-17313: Potentially possible 'case fall through' in the ObjectInspectorConverters (Oleg Danilov via Zoltan Haindrich)

2017-09-15 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 9329cd909 -> 649922d4f


HIVE-17313: Potentially possible 'case fall through' in the 
ObjectInspectorConverters (Oleg Danilov via Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/649922d4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/649922d4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/649922d4

Branch: refs/heads/master
Commit: 649922d4f71a6ee81a6ca0e13cb6b7d59bbc166f
Parents: 9329cd9
Author: Oleg Danilov 
Authored: Fri Sep 15 09:23:22 2017 +0200
Committer: Zoltan Haindrich 
Committed: Fri Sep 15 09:23:22 2017 +0200

--
 .../serde2/objectinspector/ObjectInspectorConverters.java| 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/649922d4/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java
--
diff --git 
a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java
 
b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java
index 7921de8..cdf5ed4 100644
--- 
a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java
+++ 
b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java
@@ -111,6 +111,7 @@ public final class ObjectInspectorConverters {
 return new PrimitiveObjectInspectorConverter.StringConverter(
 inputOI);
   }
+  break;
 case CHAR:
   return new PrimitiveObjectInspectorConverter.HiveCharConverter(
   inputOI,
@@ -147,11 +148,10 @@ public final class ObjectInspectorConverters {
   return new PrimitiveObjectInspectorConverter.HiveDecimalConverter(
   inputOI,
   (SettableHiveDecimalObjectInspector) outputOI);
-default:
-  throw new RuntimeException("Hive internal error: conversion of "
-  + inputOI.getTypeName() + " to " + outputOI.getTypeName()
-  + " not supported yet.");
 }
+throw new RuntimeException("Hive internal error: conversion of "
++ inputOI.getTypeName() + " to " + outputOI.getTypeName()
++ " not supported yet.");
   }
 
   /**



hive git commit: HIVE-17314: LazySimpleSerializeWrite.writeString() contains if with an empty body (Oleg Danilov via Zoltan Haindrich)

2017-09-11 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 92f9d8fb4 -> 335474210


HIVE-17314: LazySimpleSerializeWrite.writeString() contains if with an empty 
body (Oleg Danilov via Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/33547421
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/33547421
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/33547421

Branch: refs/heads/master
Commit: 33547421050b6227e51640e85a5152919dad0f23
Parents: 92f9d8f
Author: Oleg Danilov 
Authored: Mon Sep 11 11:44:37 2017 +0200
Committer: Zoltan Haindrich 
Committed: Mon Sep 11 11:44:37 2017 +0200

--
 .../hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java   | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/33547421/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java
--
diff --git 
a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java
 
b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java
index ef77daf..3790d3c 100644
--- 
a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java
+++ 
b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleSerializeWrite.java
@@ -224,15 +224,13 @@ public final class LazySimpleSerializeWrite implements 
SerializeWrite {
 
   /*
* STRING.
-   * 
+   *
* Can be used to write CHAR and VARCHAR when the caller takes 
responsibility for
* truncation/padding issues.
*/
   @Override
   public void writeString(byte[] v) throws IOException  {
 beginPrimitive();
-if (v.equals(nullSequenceBytes)) {
-}
 LazyUtils.writeEscaped(output, v, 0, v.length, isEscaped, escapeChar,
 needsEscape);
 finishPrimitive();



hive git commit: HIVE-17344: LocalCache element memory usage is not calculated properly. (Janos Gub via Zoltan Haindrich, reviewed by Sergey Shelukhin)

2017-09-11 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 8deb77940 -> 076bd7716


HIVE-17344: LocalCache element memory usage is not calculated properly. (Janos 
Gub via Zoltan Haindrich, reviewed by Sergey Shelukhin)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/076bd771
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/076bd771
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/076bd771

Branch: refs/heads/master
Commit: 076bd7716e705132521b267c748ed781b6ec12cb
Parents: 8deb779
Author: Janos Gub 
Authored: Mon Sep 11 09:30:49 2017 +0200
Committer: Zoltan Haindrich 
Committed: Mon Sep 11 09:32:52 2017 +0200

--
 ql/src/java/org/apache/hadoop/hive/ql/io/orc/LocalCache.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/076bd771/ql/src/java/org/apache/hadoop/hive/ql/io/orc/LocalCache.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/LocalCache.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/LocalCache.java
index b375aea..e28eb34 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/LocalCache.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/LocalCache.java
@@ -48,7 +48,7 @@ class LocalCache implements OrcInputFormat.FooterCache {
 public long fileLength, fileModTime;
 
 public int getMemoryUsage() {
-  return bb.remaining() + 100; // 100 is for 2 longs, BB and java 
overheads (semi-arbitrary).
+  return bb.capacity() + 100; // 100 is for 2 longs, BB and java overheads 
(semi-arbitrary).
 }
   }
 
@@ -78,8 +78,12 @@ class LocalCache implements OrcInputFormat.FooterCache {
   }
 
   public void put(Path path, OrcTail tail) {
+ByteBuffer bb = tail.getSerializedTail();
+if (bb.capacity() != bb.remaining()) {
+  throw new RuntimeException("Bytebuffer allocated for path: " + path + " 
has remaining: " + bb.remaining() + " != capacity: " + bb.capacity());
+}
 cache.put(path, new TailAndFileData(tail.getFileTail().getFileLength(),
-tail.getFileModificationTime(), tail.getSerializedTail().duplicate()));
+tail.getFileModificationTime(), bb.duplicate()));
   }
 
   @Override



[2/5] hive git commit: HIVE-17375: stddev_samp, var_samp standard compliance (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-08-28 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/dd04a92f/ql/src/test/results/clientpositive/vectorization_15.q.out
--
diff --git a/ql/src/test/results/clientpositive/vectorization_15.q.out 
b/ql/src/test/results/clientpositive/vectorization_15.q.out
index 8f0a879..8eff856 100644
--- a/ql/src/test/results/clientpositive/vectorization_15.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_15.q.out
@@ -278,50 +278,50 @@ ORDER BY cfloat, cboolean1, cdouble, cstring1, ctinyint, 
cint, ctimestamp1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
  A masked pattern was here 
--48.0  NULL-7196.0 NULL-48 NULL1969-12-31 16:00:06.337 0.0 
NULL-7196.0 -572463.388 33.00.0 0.0 -23.0   48  NULL
NULL-23 NULLNULL
--51.0  NULL-200.0  NULL-51 NULL1969-12-31 15:59:55.423 0.0 
NULL-200.0  -15910.5999 33.00.0 0.0 -23.0   51  
NULLNULL-23 NULLNULL
--51.0  false   NULL10  -51 1058319346  1969-12-31 16:00:08.451 
0.0 -1058319372.28  NULLNULL33.00.0 0.0 NULL51  
0.0 1.05831942E9-23 1058319372.28   0.0
--51.0  false   NULL10TYIE5S35U6dj3N-51 -469581869  
1969-12-31 16:00:08.451 0.0 469581842.72NULLNULL33.00.0 
0.0 NULL51  0.0 -4.69581792E8   -23 -469581842.72   0.0
--51.0  false   NULL1Lh6Uoq3WhNtOqQHu7WN7U  -51 -352637533  
1969-12-31 16:00:08.451 0.0 352637506.72NULLNULL33.00.0 
0.0 NULL51  0.0 -3.52637472E8   -23 -352637506.72   0.0
--51.0  trueNULL04Y1mA17-51 -114647521  1969-12-31 
16:00:08.451 0.0 114647494.72NULLNULL33.00.0 0.0 
NULL51  0.0 -1.14647472E8   -23 -114647494.72   0.0
--51.0  trueNULL10Wu570aLPO0p02P17FeH   -51 405338893   
1969-12-31 16:00:08.451 0.0 -405338919.28   NULLNULL33.00.0 
0.0 NULL51  0.0 4.05338944E8-23 405338919.280.0
--51.0  trueNULL3cQp060 -51 -226923315  1969-12-31 16:00:08.451 
0.0 226923288.72NULLNULL33.00.0 0.0 NULL51  
0.0 -2.26923264E8   -23 -226923288.72   0.0
--51.0  trueNULL8EPG0Xi307qd-51 -328662044  1969-12-31 
16:00:08.451 0.0 328662017.72NULLNULL33.00.0 0.0 
NULL51  0.0 -3.28661984E8   -23 -328662017.72   0.0
--51.0  trueNULL8iHtdkJ6d   -51 1006818344  1969-12-31 
16:00:08.451 0.0 -1006818370.28  NULLNULL33.00.0 0.0 
NULL51  0.0 1.00681843E9-23 1006818370.28   0.0
--51.0  trueNULLQiOcvR0kt6r7f0R7fiPxQTCU-51 266531954   
1969-12-31 16:00:08.451 0.0 -266531980.28   NULLNULL33.00.0 
0.0 NULL51  0.0 2.66532E8   -23 266531980.280.0
--51.0  trueNULLYbpj38RTTYl7CnJXPNx1g4C -51 -370919370  
1969-12-31 16:00:08.451 0.0 370919343.72NULLNULL33.00.0 
0.0 NULL51  0.0 -3.70919296E8   -23 -370919343.72   0.0
--6.0   NULL-200.0  NULL-6  NULL1969-12-31 15:59:56.094 0.0 
NULL-200.0  -15910.5999 3.0 0.0 0.0 -23.0   6   
NULLNULL-5  NULLNULL
--62.0  NULL15601.0 NULL-62 NULL1969-12-31 15:59:56.527 0.0 
NULL15601.0 1241106.353 33.00.0 0.0 -23.0   62  NULL
NULL-23 NULLNULL
-11.0   false   NULL10pO8p1LNx4Y11  271296824   1969-12-31 
16:00:02.351 0.0 -271296850.28   NULLNULL0.0 0.0 0.0 
NULL-11 0.0 2.71296832E8-1  271296850.280.0
-11.0   false   NULL1H6wGP  11  -560827082  1969-12-31 16:00:02.351 
0.0 560827055.72NULLNULL0.0 0.0 0.0 NULL-11 
0.0 -5.6082707E8-1  -560827055.72   0.0
-11.0   false   NULL2a7V63IL7jK3o   11  -325931647  1969-12-31 
16:00:02.351 0.0 325931620.72NULLNULL0.0 0.0 0.0 
NULL-11 0.0 -3.25931648E8   -1  -325931620.72   0.0
-11.0   trueNULL10  11  923658131969-12-31 16:00:02.351 
0.0 -92365839.28NULLNULL0.0 0.0 0.0 NULL-11 
0.0 9.2365808E7 -1  92365839.28 0.0
-21.0   NULL15601.0 NULL21  NULL1969-12-31 15:59:56.527 0.0 
NULL15601.0 1241106.353 12.00.0 0.0 -23.0   -21 NULL
NULL-2  NULLNULL
-32.0   NULL-200.0  NULL32  NULL1969-12-31 16:00:02.445 0.0 
NULL-200.0  -15910.5999 1.0 0.0 0.0 -23.0   -32 
NULLNULL-23 NULLNULL
-36.0   NULL-200.0  NULL

[1/5] hive git commit: HIVE-17375: stddev_samp, var_samp standard compliance (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-08-28 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master e352ef409 -> dd04a92f7


http://git-wip-us.apache.org/repos/asf/hive/blob/dd04a92f/ql/src/test/results/clientpositive/vectorization_9.q.out
--
diff --git a/ql/src/test/results/clientpositive/vectorization_9.q.out 
b/ql/src/test/results/clientpositive/vectorization_9.q.out
index 930b476..7af0bbd 100644
--- a/ql/src/test/results/clientpositive/vectorization_9.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_9.q.out
@@ -474,168 +474,168 @@ N6BMOr83ecL NULL1969-12-31 16:00:08.451 NULL
NULL0   NULLNULLNULLNULLNULL0.
 N6Dh6XreCWb0aA4nmDnFOO NULL1969-12-31 16:00:02.351 NULLNULL0   
NULLNULLNULLNULLNULL0.00NULL
 N8222wByj  NULL1969-12-31 16:00:08.451 NULLNULL0   NULL
NULLNULLNULLNULL0.00NULL
 NABd3KhjjaVfcj2Q7SJ46  NULL1969-12-31 16:00:02.351 NULLNULL0   
NULLNULLNULLNULLNULL0.00NULL
-NULL   15601.0 1969-12-31 15:59:43.919 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:44.07  -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:44.179 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:44.394 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:44.477 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:44.568 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:44.571 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:44.708 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:44.782 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:45.816 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:46.114 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:46.82  -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:46.953 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:47.134 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:47.406 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:47.511 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:47.616 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:47.975 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:48.052 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:48.299 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:48.429 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:48.552 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:48.679 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:48.943 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070356964297   -0.719942   0.0
-NULL   15601.0 1969-12-31 15:59:49.331 -9747614.5639   9747614.56391   
0.0 -0.00.0 15601.0 625.8070

[3/5] hive git commit: HIVE-17375: stddev_samp, var_samp standard compliance (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-08-28 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/dd04a92f/ql/src/test/results/clientpositive/vectorization_14.q.out
--
diff --git a/ql/src/test/results/clientpositive/vectorization_14.q.out 
b/ql/src/test/results/clientpositive/vectorization_14.q.out
index c6bd7cf..536a2a1 100644
--- a/ql/src/test/results/clientpositive/vectorization_14.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_14.q.out
@@ -290,625 +290,625 @@ ORDER BY cstring1, cfloat, cdouble, ctimestamp1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
  A masked pattern was here 
-1969-12-31 15:59:55.49150.0NULLNULL-200.0  -226.28 226.28  
0.0 -1314.0 50.0-50.0   -50.0   22.238820638820638  0.0 1   
-22.238820638820638 NULL250.0   0.0 0.0 0.0 -250.0
-1969-12-31 15:59:55.50831.0NULLNULL-200.0  -226.28 226.28  
0.0 -814.68 31.0-31.0   -31.0   22.238820638820638  0.0 1   
-22.238820638820638 NULL231.0   0.0 0.0 0.0 -231.0
-1969-12-31 15:59:55.747-3.0NULLNULL-200.0  -226.28 226.28  
0.0 78.840004   -3.03.0 3.0 22.238820638820638  0.0 
1   -22.238820638820638 NULL197.0   0.0 0.0 0.0 -197.0
-1969-12-31 15:59:55.796-43.0   NULLNULL-200.0  -226.28 226.28  
0.0 1130.04 -43.0   43.043.022.238820638820638  0.0 1   
-22.238820638820638 NULL157.0   0.0 0.0 0.0 -157.0
-1969-12-31 15:59:55.79919.0NULLNULL-200.0  -226.28 226.28  
0.0 -499.32 19.0-19.0   -19.0   22.238820638820638  0.0 1   
-22.238820638820638 NULL219.0   0.0 0.0 0.0 -219.0
-1969-12-31 15:59:55.9824.0 NULLNULL-200.0  -226.28 226.28  
0.0 -105.12 4.0 -4.0-4.022.238820638820638  0.0 1   
-22.238820638820638 NULL204.0   0.0 0.0 0.0 -204.0
-1969-12-31 15:59:56.09919.0NULLNULL-200.0  -226.28 226.28  
0.0 -499.32 19.0-19.0   -19.0   22.238820638820638  0.0 1   
-22.238820638820638 NULL219.0   0.0 0.0 0.0 -219.0
-1969-12-31 15:59:56.13126.0NULLNULL-200.0  -226.28 226.28  
0.0 -683.28 26.0-26.0   -26.0   22.238820638820638  0.0 1   
-22.238820638820638 NULL226.0   0.0 0.0 0.0 -226.0
-1969-12-31 15:59:56.14 61.0NULLNULL-200.0  -226.28 226.28  0.0 
-1603.0801  61.0-61.0   -61.0   22.238820638820638  0.0 1   
-22.238820638820638 NULL261.0   0.0 0.0 0.0 -261.0
-1969-12-31 15:59:56.159-49.0   NULLNULL-200.0  -226.28 226.28  
0.0 1287.7201   -49.0   49.049.022.238820638820638  0.0 
1   -22.238820638820638 NULL151.0   0.0 0.0 0.0 -151.0
-1969-12-31 15:59:56.174-36.0   NULLNULL-200.0  -226.28 226.28  
0.0 946.08  -36.0   36.036.022.238820638820638  0.0 1   
-22.238820638820638 NULL164.0   0.0 0.0 0.0 -164.0
-1969-12-31 15:59:56.197-42.0   NULLNULL-200.0  -226.28 226.28  
0.0 1103.76 -42.0   42.042.022.238820638820638  0.0 1   
-22.238820638820638 NULL158.0   0.0 0.0 0.0 -158.0
-1969-12-31 15:59:56.218-4.0NULLNULL-200.0  -226.28 226.28  
0.0 105.12  -4.04.0 4.0 22.238820638820638  0.0 1   
-22.238820638820638 NULL196.0   0.0 0.0 0.0 -196.0
-1969-12-31 15:59:56.276-60.0   NULLNULL-200.0  -226.28 226.28  
0.0 1576.8  -60.0   60.060.022.238820638820638  0.0 1   
-22.238820638820638 NULL140.0   0.0 0.0 0.0 -140.0
-1969-12-31 15:59:56.319-5.0NULLNULL-200.0  -226.28 226.28  
0.0 131.40001   -5.05.0 5.0 22.238820638820638  0.0 
1   -22.238820638820638 NULL195.0   0.0 0.0 0.0 -195.0
-1969-12-31 15:59:56.34554.0NULLNULL-200.0  -226.28 226.28  
0.0 -1419.1254.0-54.0   -54.0   22.238820638820638  0.0 
1   -22.238820638820638 NULL254.0   0.0 0.0 0.0 -254.0
-1969-12-31 15:59:56.414-23.0   NULLNULL-200.0  -226.28 226.28  
0.0 604.44  -23.0   23.023.022.238820638820638  0.0 1   
-22.238820638820638 NULL177.0   0.0 0.0 0.0 -177.0
-1969-12-31 15:59:56.436-33.0   NULLNULL-200.0  -226.28 226.28  
0.0 867.24005   -33.0   33.033.022.238820638820638  0.0 
1   -22.238820638820638 NULL167.0   0.0 0.0 0.0 -167.0
-1969-12-31 15:59:56.477-47.0   NULLNULL-200.0  -226.28 226.28  
0.0 1235.16 -47.0   47.047.0 

[5/5] hive git commit: HIVE-17375: stddev_samp, var_samp standard compliance (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-08-28 Thread kgyrtkirk
HIVE-17375: stddev_samp,var_samp standard compliance (Zoltan Haindrich, 
reviewed by Ashutosh Chauhan)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dd04a92f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dd04a92f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dd04a92f

Branch: refs/heads/master
Commit: dd04a92f7178df2ef26ca0b76bd2985c34a7dbaf
Parents: e352ef4
Author: Zoltan Haindrich 
Authored: Mon Aug 28 22:19:53 2017 +0200
Committer: Zoltan Haindrich 
Committed: Mon Aug 28 23:06:46 2017 +0200

--
 .../ql/udf/generic/GenericUDAFStdSample.java|   13 +-
 .../udf/generic/GenericUDAFVarianceSample.java  |   13 +-
 .../queries/clientpositive/udf_stddev_samp.q|9 +
 .../test/queries/clientpositive/udf_var_samp.q  |9 +
 .../results/clientpositive/decimal_udf.q.out|   24 +-
 .../clientpositive/llap/vectorization_15.q.out  |   94 +-
 .../clientpositive/spark/vectorization_15.q.out |   94 +-
 .../clientpositive/udf_stddev_samp.q.out|   66 +-
 .../results/clientpositive/udf_var_samp.q.out   |   66 +-
 .../clientpositive/vectorization_12.q.out   | 1064 +++
 .../clientpositive/vectorization_14.q.out   | 1244 +-
 .../clientpositive/vectorization_15.q.out   |   94 +-
 .../clientpositive/vectorization_16.q.out   |  322 ++---
 .../clientpositive/vectorization_9.q.out|  322 ++---
 14 files changed, 1783 insertions(+), 1651 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/dd04a92f/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStdSample.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStdSample.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStdSample.java
index e85046c..e032982 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStdSample.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStdSample.java
@@ -31,7 +31,10 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
  *
  */
 @Description(name = "stddev_samp",
-value = "_FUNC_(x) - Returns the sample standard deviation of a set of 
numbers")
+value = "_FUNC_(x) - Returns the sample standard deviation of a set of 
numbers.\n"
+  + "If applied to an empty set: NULL is returned.\n"
+  + "If applied to a set with a single element: NULL is returned.\n"
+  + "Otherwise it computes: sqrt(var_samp(x))")
 public class GenericUDAFStdSample extends GenericUDAFVariance {
 
   @Override
@@ -78,14 +81,10 @@ public class GenericUDAFStdSample extends 
GenericUDAFVariance {
 public Object terminate(AggregationBuffer agg) throws HiveException {
   StdAgg myagg = (StdAgg) agg;
 
-  if (myagg.count == 0) { // SQL standard - return null for zero elements
+  if (myagg.count <= 1) { // SQL standard - return null for zero or one 
elements
 return null;
   } else {
-if (myagg.count > 1) {
-  getResult().set(Math.sqrt(myagg.variance / (myagg.count - 1)));
-} else { // for one element the variance is always 0
-  getResult().set(0);
-}
+getResult().set(Math.sqrt(myagg.variance / (myagg.count - 1)));
 return getResult();
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/dd04a92f/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVarianceSample.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVarianceSample.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVarianceSample.java
index ab863be..8815086 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVarianceSample.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVarianceSample.java
@@ -31,7 +31,10 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
  *
  */
 @Description(name = "var_samp",
-value = "_FUNC_(x) - Returns the sample variance of a set of numbers")
+value = "_FUNC_(x) - Returns the sample variance of a set of numbers.\n"
+  + "If applied to an empty set: NULL is returned.\n"
+  + "If applied to a set with a single element: NULL is returned.\n"
+  + "Otherwise it computes: (S2-S1*S1/N)/(N-1)")
 public class GenericUDAFVarianceSample extends GenericUDAFVariance {
 
   @Override
@@ -78,14 +81,10 @@ public class GenericUDAFVarianceSample extends 
GenericUDAFVariance {
 public Object terminate(AggregationBuffer agg) throws HiveException {
   StdAgg myagg = (StdAgg) agg;
 
-  if (myagg.count == 0) { // SQL standard - return null for zero elements
+

[4/5] hive git commit: HIVE-17375: stddev_samp, var_samp standard compliance (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-08-28 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/dd04a92f/ql/src/test/results/clientpositive/vectorization_12.q.out
--
diff --git a/ql/src/test/results/clientpositive/vectorization_12.q.out 
b/ql/src/test/results/clientpositive/vectorization_12.q.out
index df3f047..63ea984 100644
--- a/ql/src/test/results/clientpositive/vectorization_12.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_12.q.out
@@ -284,535 +284,535 @@ ORDER BY ctimestamp1, cdouble, cbigint, cstring1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
  A masked pattern was here 
--1645852809false   DUSKf88aNULL6764.0  -4.3506048E7
1645852809  1   -1645852809 0.0 6764.0  -6764.0 6764.0  
4.3506048E7 -1645858447.15  -1645852809 -1.554726368159204E-4   6764.0  
6764.0  4.3512812E7 0.0
--1645852809false   G7Ve8Px6a7J0DafBodF8JMmaNULL-1291.0 
8303712.0   1645852809  1   -1645852809 0.0 -1291.0 1291.0  
-1291.0 -8303712.0  -1645858447.15  -1645852809 -1.554726368159204E-4   
-1291.0 -1291.0 -8305003.0  0.0
--1645852809false   K7tGy146ydkaNULL-1236.0 7949952.0   
1645852809  1   -1645852809 0.0 -1236.0 1236.0  -1236.0 
-7949952.0  -1645858447.15  -1645852809 -1.554726368159204E-4   -1236.0 
-1236.0 -7951188.0  0.0
--1645852809false   OHG2wWD83Ba NULL6914.0  -4.4470848E7
1645852809  1   -1645852809 0.0 6914.0  -6914.0 6914.0  
4.4470848E7 -1645858447.15  -1645852809 -1.554726368159204E-4   6914.0  
6914.0  4.4477762E7 0.0
--1645852809false   S7UM6KgdxTofi6rwXBFa2a  NULL12520.0 -8.052864E7 
1645852809  1   -1645852809 0.0 12520.0 -12520.012520.0 
8.052864E7  -1645858447.15  -1645852809 -1.554726368159204E-4   12520.0 
12520.0 8.054116E7  0.0
--1645852809false   eNsh5tYaNULLNULLNULL1645852809  
1   -1645852809 0.0 NULLNULLNULLNULL-1645858447.15  
-1645852809 NULLNULLNULLNULLNULL
--1645852809false   iS4P5128HY44wa  NULL3890.0  -2.502048E7 
1645852809  1   -1645852809 0.0 3890.0  -3890.0 3890.0  
2.502048E7  -1645858447.15  -1645852809 -1.554726368159204E-4   3890.0  
3890.0  2.502437E7  0.0
--1645852809false   kro4Xu41bB7hiFa NULL-3277.0 2.1077664E7 
1645852809  1   -1645852809 0.0 -3277.0 3277.0  -3277.0 
-2.1077664E7-1645858447.15  -1645852809 -1.554726368159204E-4   -3277.0 
-3277.0 -2.1080941E70.0
--1645852809false   lJ63qx87BLmdMfa NULL11619.0 -7.4733408E7
1645852809  1   -1645852809 0.0 11619.0 -11619.011619.0 
7.4733408E7 -1645858447.15  -1645852809 -1.554726368159204E-4   11619.0 
11619.0 7.4745027E7 0.0
--1645852809true4gBPJa  NULL13167.0 -8.4690144E71645852809  
1   -1645852809 0.0 13167.0 -13167.013167.0 8.4690144E7 
-1645858447.15  -1645852809 -1.554726368159204E-4   13167.0 13167.0 
8.4703311E7 0.0
--1645852809trueL057p1HPpJsmA3a NULL-9542.0 6.1374144E7 
1645852809  1   -1645852809 0.0 -9542.0 9542.0  -9542.0 
-6.1374144E7-1645858447.15  -1645852809 -1.554726368159204E-4   -9542.0 
-9542.0 -6.1383686E70.0
--1645852809truePMoJ1NvQoAm5a   NULL539.0   -3466848.0  
1645852809  1   -1645852809 0.0 539.0   -539.0  539.0   
3466848.0   -1645858447.15  -1645852809 -1.554726368159204E-4   539.0   
539.0   3467387.0   0.0
--1645852809trueTt484a  NULL754.0   -4849728.0  1645852809  
1   -1645852809 0.0 754.0   -754.0  754.0   4849728.0   
-1645858447.15  -1645852809 -1.554726368159204E-4   754.0   754.0   
4850482.0   0.0
--1645852809truea   NULL-2944.0 1.8935808E7 1645852809  
1   -1645852809 0.0 -2944.0 2944.0  -2944.0 -1.8935808E7
-1645858447.15  -1645852809 -1.554726368159204E-4   -2944.0 -2944.0 
-1.8938752E70.0
--1645852809truea   NULL-5905.0 3.798096E7  1645852809  
1   -1645852809 0.0 -5905.0 5905.0  -5905.0 -3.798096E7 
-1645858447.15  -1645852809 -1.554726368159204E-4   -5905.0 -5905.0 
-3.7986865E70.0
--1645852809truea   NULL4991.0  -3.2102112E71645852809  
1   -1645852809 0.0 4991.0  -4991.0 4991.0  3.2102112E7 
-1645858447.15  -1645852809 -1.554726368159204E-4   4991.0  4991.0  
3.2107103E7 0.0
--1645852809truebBAKio7bAmQq7vIlsc8H14a NULL1949.0  -1.2535968E7
1645852809  1   -1645852809 0.0 1949.0  -1949.0 1949.0  
1.2535968E7 -1645858447.15  -1645852809 -1.554726368159204E-4   1949.0  
1949.0  1.2537917E7 0.0
--1645852809truedun2EEixI701imr3

hive git commit: HIVE-15051: Test framework integration with findbugs, rat checks etc. (Peter Vary via Zoltan Haindrich, reviewed by Thejas M Nair)

2017-06-16 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 8d2b4e905 -> f6be1a3c0


HIVE-15051: Test framework integration with findbugs, rat checks etc. (Peter 
Vary via Zoltan Haindrich, reviewed by Thejas M Nair)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f6be1a3c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f6be1a3c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f6be1a3c

Branch: refs/heads/master
Commit: f6be1a3c0d1e15829d61b53e3bccdf0787de04d7
Parents: 8d2b4e9
Author: Peter Vary 
Authored: Fri Jun 16 16:12:33 2017 +0200
Committer: Zoltan Haindrich 
Committed: Fri Jun 16 16:15:26 2017 +0200

--
 .gitignore  |   1 +
 dev-support/checkstyle_YETUS-484.sh | 406 ++
 dev-support/findbugs_YETUS-471.sh   | 488 ++
 dev-support/hive-personality.sh | 100 +
 dev-support/maven_YETUS-506.sh  | 687 +++
 dev-support/smart-apply-patch.sh|  18 +
 dev-support/test-patch.sh   |  18 +
 dev-support/yetus-wrapper.sh| 185 +
 pom.xml |  18 +
 9 files changed, 1921 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f6be1a3c/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 47c59da..89169b0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,3 +29,4 @@ hcatalog/webhcat/svr/target
 conf/hive-default.xml.template
 itests/hive-blobstore/src/test/resources/blobstore-conf.xml
 .DS_Store
+patchprocess

http://git-wip-us.apache.org/repos/asf/hive/blob/f6be1a3c/dev-support/checkstyle_YETUS-484.sh
--
diff --git a/dev-support/checkstyle_YETUS-484.sh 
b/dev-support/checkstyle_YETUS-484.sh
new file mode 100644
index 000..e297a13
--- /dev/null
+++ b/dev-support/checkstyle_YETUS-484.sh
@@ -0,0 +1,406 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_test_type checkstyle
+
+CHECKSTYLE_TIMER=0
+CHECKSTYLE_GOAL_DEFAULT="checkstyle"
+CHECKSTYLE_GOAL="${CHECKSTYLE_GOAL_DEFAULT}"
+CHECKSTYLE_OPTIONS_DEFAULT="-Dcheckstyle.consoleOutput=true"
+CHECKSTYLE_OPTIONS="${CHECKSTYLE_OPTIONS_DEFAULT}"
+
+function checkstyle_filefilter
+{
+  local filename=$1
+
+  if [[ ${BUILDTOOL} == maven
+|| ${BUILDTOOL} == ant ]]; then
+if [[ ${filename} =~ \.java$ ]]; then
+  add_test checkstyle
+fi
+  fi
+}
+
+## @description  usage help for checkstyle
+## @audience private
+## @stabilityevolving
+## @replaceable  no
+function checkstyle_usage
+{
+  yetus_add_option "--checkstyle-goal=" "Checkstyle maven plugin goal to 
use, 'check' and 'checkstyle' supported. Defaults to 
'${CHECKSTYLE_GOAL_DEFAULT}'."
+}
+
+## @description  parse checkstyle args
+## @audience private
+## @stabilityevolving
+## @replaceable  no
+## @paramarg
+## @param..
+function checkstyle_parse_args
+{
+  local i
+
+  for i in "$@"; do
+case ${i} in
+--checkstyle-goal=*)
+  CHECKSTYLE_GOAL=${i#*=}
+case ${CHECKSTYLE_GOAL} in
+check)
+CHECKSTYLE_OPTIONS="-Dcheckstyle.consoleOutput=true 
-Dcheckstyle.failOnViolation=false"
+;;
+checkstyle)
+;;
+*)
+yetus_error "Warning: checkstyle goal ${CHECKSTYLE_GOAL} not 
supported. It may have unexpected behavior"
+;;
+esac
+;;
+esac
+  done
+}
+
+## @description  initialize the checkstyle plug-in
+## @audience private
+## @stabilityevolving
+## @replaceable  no
+function checkstyle_initialize
+{
+  if declare -f maven_add_install >/dev/null 2>&1; then
+maven_add_install checkstyle
+  fi
+}
+
+## @description  checkstyle plug-in specific difference calculator
+## @audience private
+## @stabilityevolving
+## @replaceable  no
+## @parambranchlog
+## @parampatchlog
+## @return   differences
+function checkstyle_calcdiffs
+{
+  declare orig=$1
+  

hive git commit: HIVE-16715: Clean up javadoc from errors in modules llap-client, metastore, spark-client (Janos Gub via Zoltan Haindrich)

2017-06-16 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 8dfa8a5ae -> 8d2b4e905


HIVE-16715: Clean up javadoc from errors in modules llap-client, metastore, 
spark-client (Janos Gub via Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8d2b4e90
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8d2b4e90
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8d2b4e90

Branch: refs/heads/master
Commit: 8d2b4e905c9d802ac0fc34c5a2c7344bffa5d451
Parents: 8dfa8a5
Author: Janos Gub 
Authored: Fri Jun 16 14:38:47 2017 +0200
Committer: Zoltan Haindrich 
Committed: Fri Jun 16 14:38:47 2017 +0200

--
 .../llap/ext/LlapTaskUmbilicalExternalClient.java |  1 -
 .../hadoop/hive/llap/registry/ServiceInstance.java|  2 +-
 .../apache/hadoop/hive/metastore/DatabaseProduct.java |  2 +-
 .../org/apache/hadoop/hive/metastore/Deadline.java|  4 ++--
 .../hadoop/hive/metastore/HiveMetaStoreClient.java|  8 
 .../hadoop/hive/metastore/IMetaStoreClient.java   | 14 +++---
 .../hadoop/hive/metastore/IMetaStoreSchemaInfo.java   |  2 +-
 .../hadoop/hive/metastore/MetaStoreFilterHook.java|  5 ++---
 .../apache/hadoop/hive/metastore/MetaStoreThread.java |  2 +-
 .../apache/hadoop/hive/metastore/MetaStoreUtils.java  |  5 ++---
 .../hadoop/hive/metastore/PartFilterExprUtil.java |  2 +-
 .../org/apache/hadoop/hive/metastore/RawStore.java|  2 --
 .../hadoop/hive/metastore/events/InsertEvent.java |  2 +-
 .../hadoop/hive/metastore/hbase/HBaseReadWrite.java   |  1 -
 .../hadoop/hive/metastore/hbase/MetadataStore.java|  2 +-
 .../event/filters/DatabaseAndTableFilter.java |  2 +-
 .../hadoop/hive/metastore/parser/ExpressionTree.java  |  4 +---
 .../hadoop/hive/metastore/tools/HiveSchemaHelper.java |  2 +-
 .../apache/hadoop/hive/metastore/txn/TxnStore.java|  2 +-
 .../apache/hadoop/hive/metastore/txn/TxnUtils.java|  2 +-
 .../hive/metastore/model/MStorageDescriptor.java  |  2 +-
 .../org/apache/hive/spark/client/SparkClient.java |  9 +
 .../apache/hive/spark/client/rpc/RpcDispatcher.java   |  7 ---
 23 files changed, 39 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
--
diff --git 
a/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
 
b/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
index c7de417..406bdda 100644
--- 
a/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
+++ 
b/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
@@ -196,7 +196,6 @@ public class LlapTaskUmbilicalExternalClient extends 
AbstractService implements
 
   /**
* Submit the work for actual execution.
-   * @throws InvalidProtocolBufferException 
*/
   public void submitWork(SubmitWorkRequestProto request, String llapHost, int 
llapPort) {
 // Register the pending events to be sent for this spec.

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
--
diff --git 
a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
 
b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
index 081995c..70515c4 100644
--- 
a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
+++ 
b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
@@ -20,7 +20,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 public interface ServiceInstance {
 
   /**
-   * Worker identity is a UUID (unique across restarts), to identify a node 
which died & was brought
+   * Worker identity is a UUID (unique across restarts), to identify a node 
which died & was brought
* back on the same host/port
*/
   public String getWorkerIdentity();

http://git-wip-us.apache.org/repos/asf/hive/blob/8d2b4e90/metastore/src/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java
index 33abbb2..7634852 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java
@@ -27,7 +27,7 @@ public enum DatabaseProduct {
 
   /*

hive git commit: HIVE-16618: Clean up javadoc from errors in module hive-common [addendum] (Janos Gub via Zoltan Haindrich)

2017-06-02 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 9f976 -> 0155a34cd


HIVE-16618: Clean up javadoc from errors in module hive-common [addendum] 
(Janos Gub via Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0155a34c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0155a34c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0155a34c

Branch: refs/heads/master
Commit: 0155a34cd0413b82b9f82aea2bf8ef9198377be3
Parents: 9f976ff
Author: Janos Gub 
Authored: Fri Jun 2 16:28:58 2017 +0200
Committer: Zoltan Haindrich 
Committed: Fri Jun 2 16:29:05 2017 +0200

--
 .../hive/http/Log4j2ConfiguratorServlet.java  | 18 +++---
 1 file changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0155a34c/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java
--
diff --git 
a/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java 
b/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java
index 8042f21..6d07fb4 100644
--- a/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java
+++ b/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java
@@ -38,13 +38,13 @@ import org.slf4j.LoggerFactory;
 
 /**
  * A servlet to configure log4j2.
- * 
+ * 
  * HTTP GET returns all loggers and it's log level in JSON formatted response.
- * 
+ * 
  * HTTP POST is used for configuring the loggers. POST data should be in the 
same format as GET's response.
  * To configure (add/update existing loggers), use HTTP POST with logger names 
and level in the following JSON format.
  *
- * 
+ * 
  * 
  * {
  *  "loggers": [ {
@@ -57,11 +57,12 @@ import org.slf4j.LoggerFactory;
  *"logger" : "org.apache.zookeeper.server.NIOServerCnxn",
  *"level" : "WARN"
  *  }]
- * }
+ * }
  * 
  *
- * 
+ * 
  * Example usage:
+ * 
  * 
  *Returns all loggers with levels in JSON format:
  *
@@ -98,14 +99,17 @@ import org.slf4j.LoggerFactory;
  *  { "logger" : "org.apache.orc", "level" : "INFO" } ] }' 
http://hostame:port/conflog
  *
  * 
- * 
+ * 
+ * 
  * Response Status Codes:
- * 
+ * 
+ * 
  * 200 - OK : If the POST data is valid and if the request succeeds or if 
GET request succeeds.
  * 401 - UNAUTHORIZED : If the user does not have privileges to access 
instrumentation servlets.
  *  Refer 
hadoop.security.instrumentation.requires.admin config for more 
info.
  * 400 - BAD_REQUEST : If the POST data is not a valid JSON.
  * 500 - INTERNAL_SERVER_ERROR : If GET requests throws any IOException 
during JSON output generation.
+ * 
  */
 public class Log4j2ConfiguratorServlet extends HttpServlet {
   private static final long serialVersionUID = 1L;



hive git commit: HIVE-15834: Add unit tests for org.json usage on master (Daniel Voros via Zoltan Haindrich)

2017-05-25 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 811e59951 -> 2fa4dc277


HIVE-15834: Add unit tests for org.json usage on master (Daniel Voros via 
Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2fa4dc27
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2fa4dc27
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2fa4dc27

Branch: refs/heads/master
Commit: 2fa4dc277e0cd28261602c392c2f55d040d22677
Parents: 811e599
Author: Daniel Voros 
Authored: Thu May 25 21:16:44 2017 +0200
Committer: Zoltan Haindrich 
Committed: Thu May 25 21:16:44 2017 +0200

--
 .../hadoop/hive/common/jsonexplain/Op.java  |   4 +-
 .../hadoop/hive/common/jsonexplain/TestOp.java  |  81 +
 .../hive/common/jsonexplain/TestStage.java  | 194 
 .../hive/common/jsonexplain/TestVertex.java | 108 +++
 .../jsonexplain/tez/TestTezJsonParser.java  |  53 
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |  19 +-
 .../apache/hadoop/hive/ql/hooks/ATSHook.java|   4 +-
 .../hadoop/hive/ql/exec/TestExplainTask.java| 293 ++-
 .../hadoop/hive/ql/hooks/TestATSHook.java   |  59 
 9 files changed, 796 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2fa4dc27/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Op.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Op.java 
b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Op.java
index 39c44f1..e9eb5a7 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Op.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Op.java
@@ -29,6 +29,7 @@ import java.util.Set;
 
 import org.apache.hadoop.hive.common.jsonexplain.Vertex.VertexType;
 import org.json.JSONArray;
+import com.google.common.annotations.VisibleForTesting;
 import org.json.JSONException;
 import org.json.JSONObject;
 
@@ -85,7 +86,8 @@ public final class Op {
 }
   }
 
-  private void inlineJoinOp() throws Exception {
+  @VisibleForTesting
+  void inlineJoinOp() throws Exception {
 // inline map join operator
 if (this.type == OpType.MAPJOIN) {
   // get the map for posToVertex

http://git-wip-us.apache.org/repos/asf/hive/blob/2fa4dc27/common/src/test/org/apache/hadoop/hive/common/jsonexplain/TestOp.java
--
diff --git 
a/common/src/test/org/apache/hadoop/hive/common/jsonexplain/TestOp.java 
b/common/src/test/org/apache/hadoop/hive/common/jsonexplain/TestOp.java
new file mode 100644
index 000..eb5dca4
--- /dev/null
+++ b/common/src/test/org/apache/hadoop/hive/common/jsonexplain/TestOp.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common.jsonexplain;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.hadoop.hive.common.jsonexplain.tez.TezJsonParser;
+import org.json.JSONObject;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestOp {
+
+  private ObjectMapper objectMapper = new ObjectMapper();
+  private TezJsonParser tezJsonParser;
+
+  @Before
+  public void setUp() throws Exception {
+this.tezJsonParser = new TezJsonParser();
+  }
+
+
+  @Test
+  public void testInlineJoinOpJsonHandling() throws Exception {
+String jsonString = "{" +
+"\"input vertices:\":{\"a\":\"AVERTEX\"}," + "\"condition map:\": 
[" +
+"{\"c1\": \"{\\\"type\\\": \\\"type\\\", \\\"left\\\": 
\\\"left\\\", " +
+"\\\"right\\\": \\\"right\\\"}\"}]," +
+"\"keys:\":{\"left\":\"AKEY\", \"right\":\"BKEY\"}}";
+JSONObject mapJoin = new JSONObject(jsonString);
+
+Vertex vertexB = new Vertex("vertex-b", null, null, tezJsonParser);
+Op dummyOp = new 

[6/6] hive git commit: HIVE-15483: Database and table name is case sensitive when used in show grant (Niklaus Xiao via Zoltan Haindrich)

2017-05-15 Thread kgyrtkirk
HIVE-15483: Database and table name is case sensitive when used in show grant 
(Niklaus Xiao via Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/360a91e6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/360a91e6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/360a91e6

Branch: refs/heads/master
Commit: 360a91e64590940641aff9304eb0275cbfd82e39
Parents: 91948ec
Author: Niklaus Xiao 
Authored: Tue May 16 08:39:16 2017 +0200
Committer: Zoltan Haindrich 
Committed: Tue May 16 08:39:16 2017 +0200

--
 .../src/java/org/apache/hadoop/hive/metastore/ObjectStore.java   | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/360a91e6/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index ee48617..b28983f 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -5953,6 +5953,8 @@ public class ObjectStore implements RawStore, 
Configurable {
   public List listTableGrantsAll(String dbName, String 
tableName) {
 boolean success = false;
 Query query = null;
+dbName = HiveStringUtils.normalizeIdentifier(dbName);
+tableName = HiveStringUtils.normalizeIdentifier(tableName);
 try {
   openTransaction();
   LOG.debug("Executing listTableGrantsAll");
@@ -6153,6 +6155,8 @@ public class ObjectStore implements RawStore, 
Configurable {
   String columnName) {
 boolean success = false;
 Query query = null;
+dbName = HiveStringUtils.normalizeIdentifier(dbName);
+tableName = HiveStringUtils.normalizeIdentifier(tableName);
 try {
   openTransaction();
   LOG.debug("Executing listPrincipalTableColumnGrantsAll");



[1/6] hive git commit: HIVE-15726: Reenable indentation checks to checkstyle (Peter Vary via Zoltan Haindrich)

2017-05-15 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 0ce98b3a7 -> 360a91e64


HIVE-15726: Reenable indentation checks to checkstyle (Peter Vary via Zoltan 
Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1b8ba022
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1b8ba022
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1b8ba022

Branch: refs/heads/master
Commit: 1b8ba022c26ef929f35dc12c5c70e1683fa2e373
Parents: 0ce98b3
Author: Peter Vary 
Authored: Tue May 16 08:13:28 2017 +0200
Committer: Zoltan Haindrich 
Committed: Tue May 16 08:13:28 2017 +0200

--
 checkstyle/checkstyle.xml | 8 +---
 1 file changed, 1 insertion(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1b8ba022/checkstyle/checkstyle.xml
--
diff --git a/checkstyle/checkstyle.xml b/checkstyle/checkstyle.xml
index bd6b2f8..82b0a28 100644
--- a/checkstyle/checkstyle.xml
+++ b/checkstyle/checkstyle.xml
@@ -208,17 +208,11 @@
 
 
 
-
-
-
-
-
-
 
 
 



[4/6] hive git commit: HIVE-16619: Clean up javadoc from errors in module hive-serde (Janos Gub via Zoltan Haindrich)

2017-05-15 Thread kgyrtkirk
HIVE-16619: Clean up javadoc from errors in module hive-serde (Janos Gub via 
Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/23e703f9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/23e703f9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/23e703f9

Branch: refs/heads/master
Commit: 23e703f96935ecd6c7daf6316329f305ccac6615
Parents: 202c513
Author: Janos Gub 
Authored: Tue May 16 08:30:58 2017 +0200
Committer: Zoltan Haindrich 
Committed: Tue May 16 08:30:58 2017 +0200

--
 .../hadoop/hive/serde2/AbstractSerDe.java   |  2 +-
 .../hive/serde2/ColumnProjectionUtils.java  |  8 
 .../apache/hadoop/hive/serde2/SerDeUtils.java   |  2 +-
 .../serde2/avro/AvroLazyObjectInspector.java|  2 +-
 .../hive/serde2/avro/AvroSchemaRetriever.java   |  2 +-
 .../hive/serde2/columnar/ColumnarSerDe.java |  4 ++--
 .../serde2/columnar/ColumnarStructBase.java |  2 +-
 .../dynamic_type/DynamicSerDeStructBase.java|  2 +-
 .../serde2/dynamic_type/ParseException.java |  2 +-
 .../hive/serde2/fast/DeserializeRead.java   |  6 +++---
 .../hive/serde2/io/TimestampWritable.java   |  6 +++---
 .../hadoop/hive/serde2/lazy/LazyDate.java   |  2 +-
 .../hadoop/hive/serde2/lazy/LazyFactory.java| 21 
 .../hive/serde2/lazy/LazyHiveDecimal.java   |  2 +-
 .../hive/serde2/lazy/LazySimpleSerDe.java   | 12 +--
 .../hive/serde2/lazybinary/LazyBinaryArray.java |  2 +-
 .../hive/serde2/lazybinary/LazyBinaryMap.java   |  2 +-
 .../hive/serde2/lazybinary/LazyBinarySerDe.java |  6 +++---
 .../serde2/lazybinary/LazyBinaryString.java |  2 +-
 .../serde2/lazybinary/LazyBinaryStruct.java |  2 +-
 .../hive/serde2/lazybinary/LazyBinaryUnion.java |  2 +-
 .../hive/serde2/lazybinary/LazyBinaryUtils.java |  2 +-
 .../serde2/objectinspector/ObjectInspector.java |  2 +-
 .../objectinspector/ObjectInspectorUtils.java   |  4 ++--
 .../PrimitiveObjectInspectorFactory.java|  6 +++---
 .../hive/serde2/typeinfo/TypeInfoUtils.java |  4 ++--
 26 files changed, 52 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/23e703f9/serde/src/java/org/apache/hadoop/hive/serde2/AbstractSerDe.java
--
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/AbstractSerDe.java 
b/serde/src/java/org/apache/hadoop/hive/serde2/AbstractSerDe.java
index 049b35d..a2a85b3 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/AbstractSerDe.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/AbstractSerDe.java
@@ -118,7 +118,7 @@ public abstract class AbstractSerDe implements 
Deserializer, Serializer {
   }
 
   /**
-   * @rturn Whether the SerDe that can store schema both inside and outside of 
metastore
+   * @return Whether the SerDe that can store schema both inside and outside 
of metastore
*does, in fact, store it inside metastore, based on table 
parameters.
*/
   public boolean shouldStoreFieldsInMetastore(Map tableParams) 
{

http://git-wip-us.apache.org/repos/asf/hive/blob/23e703f9/serde/src/java/org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java
--
diff --git 
a/serde/src/java/org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java 
b/serde/src/java/org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java
index 9844166..2009645 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java
@@ -44,7 +44,7 @@ public final class ColumnProjectionUtils {
   /**
* the nested column path is the string from the root to the leaf
* e.g.
-   * c:struct
+   * c:struct
* the column a's path is c.a and b's path is c.b
*/
   public static final String READ_NESTED_COLUMN_PATH_CONF_STR =
@@ -57,7 +57,7 @@ public final class ColumnProjectionUtils {
   private static final Joiner CSV_JOINER = Joiner.on(",").skipNulls();
 
   /**
-   * @deprecated for backwards compatibility with <= 0.12, use 
setReadAllColumns
+   * @deprecated for backwards compatibility with <= 0.12, use 
setReadAllColumns
*/
   @Deprecated
   public static void setFullyReadColumns(Configuration conf) {
@@ -65,7 +65,7 @@ public final class ColumnProjectionUtils {
   }
 
   /**
-   * @deprecated for backwards compatibility with <= 0.12, use 
setReadAllColumns
+   * @deprecated for backwards compatibility with <= 0.12, use 
setReadAllColumns
* and appendReadColumns
*/
   @Deprecated
@@ -76,7 +76,7 @@ public final class ColumnProjectionUtils {
   }
 
   /**
-   * @deprecated for backwards compatibility with <= 0.12, use 
ap

[2/6] hive git commit: HIVE-16617: Clean up javadoc from errors in module hive-shims (Janos Gub via Zoltan Haindrich)

2017-05-15 Thread kgyrtkirk
HIVE-16617: Clean up javadoc from errors in module hive-shims (Janos Gub via 
Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7827316f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7827316f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7827316f

Branch: refs/heads/master
Commit: 7827316f0c4cb9e9fe63506687b33bf4b2c4e70a
Parents: 1b8ba02
Author: Janos Gub 
Authored: Tue May 16 08:25:37 2017 +0200
Committer: Zoltan Haindrich 
Committed: Tue May 16 08:25:37 2017 +0200

--
 .../src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java | 5 ++---
 .../org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge.java   | 4 ++--
 .../hive/thrift/TokenStoreDelegationTokenSecretManager.java | 2 +-
 3 files changed, 5 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/7827316f/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
--
diff --git 
a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java 
b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
index 9c6901d..c280d49 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
@@ -168,7 +168,6 @@ public interface HadoopShims {
* All updates to jobtracker/resource manager rpc address
* in the configuration should be done through this shim
* @param conf
-   * @return
*/
   public void setJobLauncherRpcAddress(Configuration conf, String val);
 
@@ -252,12 +251,12 @@ public interface HadoopShims {
 
   /**
* For the block locations returned by getLocations() convert them into a 
Treemap
-   *  by iterating over the list of blockLocation.
+   *  by iterating over the list of blockLocation.
* Using TreeMap from offset to blockLocation, makes it O(logn) to get a 
particular
* block based upon offset.
* @param fs the file system
* @param status the file information
-   * @return TreeMap
+   * @return TreeMap
* @throws IOException
*/
   TreeMap getLocationsWithOffset(FileSystem fs,

http://git-wip-us.apache.org/repos/asf/hive/blob/7827316f/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge.java
--
diff --git 
a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge.java
 
b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge.java
index d420d09..fd86fed 100644
--- 
a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge.java
+++ 
b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HadoopThriftAuthBridge.java
@@ -169,9 +169,9 @@ public abstract class HadoopThriftAuthBridge {
 /**
  * Create a client-side SASL transport that wraps an underlying transport.
  *
- * @param method The authentication method to use. Currently only KERBEROS 
is
+ * @param methodStr The authentication method to use. Currently only 
KERBEROS is
  *   supported.
- * @param serverPrincipal The Kerberos principal of the target server.
+ * @param principalConfig The Kerberos principal of the target server.
  * @param underlyingTransport The underlying transport mechanism, usually 
a TSocket.
  * @param saslProps the sasl properties to create the client with
  */

http://git-wip-us.apache.org/repos/asf/hive/blob/7827316f/shims/common/src/main/java/org/apache/hadoop/hive/thrift/TokenStoreDelegationTokenSecretManager.java
--
diff --git 
a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/TokenStoreDelegationTokenSecretManager.java
 
b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/TokenStoreDelegationTokenSecretManager.java
index 4d910d8..4719b85 100644
--- 
a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/TokenStoreDelegationTokenSecretManager.java
+++ 
b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/TokenStoreDelegationTokenSecretManager.java
@@ -184,7 +184,7 @@ public class TokenStoreDelegationTokenSecretManager extends 
DelegationTokenSecre
 
   /**
* Synchronize master key updates / sequence generation for multiple nodes.
-   * NOTE: {@Link AbstractDelegationTokenSecretManager} keeps currentKey 
private, so we need
+   * NOTE: {@link AbstractDelegationTokenSecretManager} keeps currentKey 
private, so we need
* to utilize this "hook" to manipulate the key through the object reference.
* This .20S workaround should cease to exist when H

[5/6] hive git commit: HIVE-16413: Create table as select does not check ownership of the location (Niklaus Xiao via Zoltan Haindrich)

2017-05-15 Thread kgyrtkirk
HIVE-16413: Create table as select does not check ownership of the location 
(Niklaus Xiao via Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/91948ec0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/91948ec0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/91948ec0

Branch: refs/heads/master
Commit: 91948ec00b1b642b401bfb529f211eb59b8b16ad
Parents: 23e703f
Author: Niklaus Xiao 
Authored: Tue May 16 08:36:05 2017 +0200
Committer: Zoltan Haindrich 
Committed: Tue May 16 08:36:05 2017 +0200

--
 .../security/authorization/plugin/sqlstd/Operation2Privilege.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/91948ec0/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
index 18b0e1c..9688f8c 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
@@ -302,6 +302,7 @@ public class Operation2Privilege {
 (SEL_NOGRANT_AR, null));
 op2Priv.put(HiveOperationType.CREATETABLE_AS_SELECT, 
PrivRequirement.newPrivRequirementList(
 new PrivRequirement(SEL_NOGRANT_AR, IOType.INPUT),
+new PrivRequirement(OWNER_INS_SEL_DEL_NOGRANT_AR, 
HivePrivilegeObjectType.DFS_URI),
 new PrivRequirement(OWNER_PRIV_AR, HivePrivilegeObjectType.DATABASE)));
 
 // QUERY,LOAD op can contain an insert & overwrite,



[3/6] hive git commit: HIVE-16618: Clean up javadoc from errors in module hive-common (Janos Gub via Zoltan Haindrich)

2017-05-15 Thread kgyrtkirk
HIVE-16618: Clean up javadoc from errors in module hive-common (Janos Gub via 
Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/202c5137
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/202c5137
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/202c5137

Branch: refs/heads/master
Commit: 202c5137f2b73ead25d6907d1f74a8857690440e
Parents: 7827316
Author: Janos Gub 
Authored: Tue May 16 08:27:39 2017 +0200
Committer: Zoltan Haindrich 
Committed: Tue May 16 08:27:39 2017 +0200

--
 .../org/apache/hadoop/hive/common/CompressionUtils.java |  8 
 .../java/org/apache/hadoop/hive/common/JavaUtils.java   |  4 ++--
 .../org/apache/hadoop/hive/common/StatsSetupConst.java  |  6 +++---
 .../hadoop/hive/common/ValidCompactorTxnList.java   |  4 ++--
 .../hive/common/classification/RetrySemantics.java  |  1 -
 .../apache/hadoop/hive/common/cli/CommonCliOptions.java |  4 ++--
 .../hive/common/metrics/common/MetricsVariable.java |  2 +-
 .../org/apache/hadoop/hive/common/type/Decimal128.java  | 12 +---
 .../java/org/apache/hadoop/hive/conf/HiveConfUtil.java  |  3 ++-
 .../org/apache/hive/common/util/HiveStringUtils.java|  4 ++--
 .../apache/hive/common/util/ShutdownHookManager.java|  2 +-
 common/src/java/org/apache/hive/http/HttpServer.java|  6 --
 .../src/java/org/apache/hive/http/JMXJsonServlet.java   |  6 +++---
 13 files changed, 31 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/202c5137/common/src/java/org/apache/hadoop/hive/common/CompressionUtils.java
--
diff --git 
a/common/src/java/org/apache/hadoop/hive/common/CompressionUtils.java 
b/common/src/java/org/apache/hadoop/hive/common/CompressionUtils.java
index d26207d..c4f2297 100644
--- a/common/src/java/org/apache/hadoop/hive/common/CompressionUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/CompressionUtils.java
@@ -112,8 +112,8 @@ public class CompressionUtils {
* The output file is created in the output folder, having the same name as 
the input file, minus
* the '.tar' extension.
*
-   * @param inputFile the input .tar file
-   * @param outputDir the output directory file.
+   * @param inputFileName the input .tar file
+   * @param outputDirName the output directory file.
* @throws IOException
* @throws FileNotFoundException
*
@@ -131,8 +131,8 @@ public class CompressionUtils {
* The output file is created in the output folder, having the same name as 
the input file, minus
* the '.tar' extension.
*
-   * @param inputFile the input .tar file
-   * @param outputDir the output directory file.
+   * @param inputFileName the input .tar file
+   * @param outputDirName the output directory file.
* @throws IOException
* @throws FileNotFoundException
*

http://git-wip-us.apache.org/repos/asf/hive/blob/202c5137/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java 
b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
index 3916fe3..b224d26 100644
--- a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
@@ -138,14 +138,14 @@ public final class JavaUtils {
 
   /**
* Utility method for ACID to normalize logging info.  Matches
-   * {@link org.apache.hadoop.hive.metastore.api.LockRequest#toString()}
+   * org.apache.hadoop.hive.metastore.api.LockRequest#toString
*/
   public static String lockIdToString(long extLockId) {
 return "lockid:" + extLockId;
   }
   /**
* Utility method for ACID to normalize logging info.  Matches
-   * {@link org.apache.hadoop.hive.metastore.api.LockResponse#toString()}
+   * org.apache.hadoop.hive.metastore.api.LockResponse#toString
*/
   public static String txnIdToString(long txnId) {
 return "txnid:" + txnId;

http://git-wip-us.apache.org/repos/asf/hive/blob/202c5137/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java 
b/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
index a9e17c2..2387407 100644
--- a/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
+++ b/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
@@ -110,18 +110,18 @@ public class StatsSetupConst {
 
   public static final String STATS_FILE_PREFIX = "tmpstats-";
   /**
-   * @return List of all supported statistics
+   * List of all sup

hive git commit: Revert "HIVE-16501 : Add rej/orig to .gitignore"

2017-05-15 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 0efb93681 -> 413245ed9


Revert "HIVE-16501 : Add rej/orig to .gitignore"

Note: removal of *.orig files have been kept
This reverts commit c911f42035d9e7b91191e2683f85d8fd2a35eb27.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/413245ed
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/413245ed
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/413245ed

Branch: refs/heads/master
Commit: 413245ed90f09ab446711ac37f32591e3ac68462
Parents: 0efb936
Author: Zoltan Haindrich 
Authored: Mon May 15 20:31:15 2017 +0200
Committer: Zoltan Haindrich 
Committed: Mon May 15 20:33:15 2017 +0200

--
 .gitignore | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/413245ed/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 8578a64..47c59da 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,5 +29,3 @@ hcatalog/webhcat/svr/target
 conf/hive-default.xml.template
 itests/hive-blobstore/src/test/resources/blobstore-conf.xml
 .DS_Store
-*.rej
-*.orig



hive git commit: HIVE-15224: replace org.json usage in branch-1 with as minor changes as possible (Daniel Voros via Zoltan Haindrich)

2017-05-07 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/branch-1 cb7bb81e9 -> 8528ba8ff


HIVE-15224: replace org.json usage in branch-1 with as minor changes as 
possible (Daniel Voros via Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8528ba8f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8528ba8f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8528ba8f

Branch: refs/heads/branch-1
Commit: 8528ba8ff15b424865478f1da6575250f35dbd7e
Parents: cb7bb81
Author: Daniel Voros 
Authored: Mon May 8 08:20:43 2017 +0200
Committer: Zoltan Haindrich 
Committed: Mon May 8 08:32:39 2017 +0200

--
 LICENSE |  28 +
 common/pom.xml  |   9 +-
 .../hive/common/jsonexplain/JsonParser.java |   2 +-
 .../hive/common/jsonexplain/JsonUtils.java  |  60 +++
 .../hadoop/hive/common/jsonexplain/tez/Op.java  |  20 ++--
 .../hive/common/jsonexplain/tez/Stage.java  |  64 ++--
 .../common/jsonexplain/tez/TezJsonParser.java   |  30 +++---
 .../hive/common/jsonexplain/tez/Vertex.java |  49 -
 .../hive/common/jsonexplain/TestJsonUtils.java  | 102 +++
 .../hive/common/jsonexplain/tez/TestOp.java |   8 +-
 .../hive/common/jsonexplain/tez/TestStage.java  |  23 +++--
 .../jsonexplain/tez/TestTezJsonParser.java  |   9 +-
 .../hive/common/jsonexplain/tez/TestVertex.java |  18 ++--
 pom.xml |  12 +--
 ql/pom.xml  |  11 +-
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |  28 ++---
 .../apache/hadoop/hive/ql/exec/tez/TezTask.java |   6 +-
 .../apache/hadoop/hive/ql/hooks/ATSHook.java|   2 +-
 .../apache/hadoop/hive/ql/parse/EximUtil.java   |  29 +++---
 .../hadoop/hive/ql/exec/TestExplainTask.java|  18 ++--
 .../hadoop/hive/ql/hooks/TestATSHook.java   |   5 +-
 .../hadoop/hive/ql/parse/TestEximUtil.java  |   7 +-
 22 files changed, 343 insertions(+), 197 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8528ba8f/LICENSE
--
diff --git a/LICENSE b/LICENSE
index db3777d..c607f21 100644
--- a/LICENSE
+++ b/LICENSE
@@ -305,32 +305,6 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 
IF ADVISED OF
 THE POSSIBILITY OF SUCH DAMAGE.
 
 
-For the org.json library:
-
-Copyright (c) 2002 JSON.org
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-The Software shall be used for Good, not Evil.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-
 For the JLine library:
 
 Copyright (c) 2002-2006, Marc Prud'hommeaux 
@@ -494,4 +468,4 @@ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
PROFITS; OR BUSINESS
 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
+POSSIBILITY OF SUCH DAMAGE.

http://git-wip-us.apache.org/repos/asf/hive/blob/8528ba8f/common/pom.xml
--
diff --git a/common/pom.xml b/common/pom.xml
index 8d4b1ea..1a70875 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -88,17 +88,16 @@
   test
 
 
+  com.googlecode.json-simple
+  json-simple
+
+
   junit
   junit
   ${junit.version}
   test
 
 
-  org.json
-  json
-  ${json.version}
-
-
   io.dropwizard.metrics
   metrics-core
   ${dropwizard.version}

http://git-wip-us.apache.org/repos/asf/hive/blob/8528ba8f/common/src/java/org/apache/hadoop/hive/common/jsonexplain/JsonParser.java
---

[1/2] hive git commit: HIVE-13583: E061-14: Search Conditions (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-05-07 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master ec8c390ee -> 54dbca69c


HIVE-13583: E061-14: Search Conditions (Zoltan Haindrich, reviewed by Ashutosh 
Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/54dbca69
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/54dbca69
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/54dbca69

Branch: refs/heads/master
Commit: 54dbca69c9ea630b9cccd5550bdb455b9bbc240c
Parents: 0f8840a
Author: Zoltan Haindrich 
Authored: Mon May 8 07:27:01 2017 +0200
Committer: Zoltan Haindrich 
Committed: Mon May 8 07:44:07 2017 +0200

--
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |   4 +
 .../translator/SqlFunctionConverter.java|   4 +-
 .../apache/hadoop/hive/ql/parse/HiveParser.g|   4 -
 .../hadoop/hive/ql/parse/IdentifiersParser.g|  19 ++-
 .../hadoop/hive/ql/parse/SubQueryUtils.java |   2 +-
 .../hive/ql/parse/TypeCheckProcFactory.java |   9 --
 .../hive/ql/udf/generic/GenericUDFOPFalse.java  |  65 +
 .../ql/udf/generic/GenericUDFOPNotFalse.java|  65 +
 .../ql/udf/generic/GenericUDFOPNotTrue.java |  65 +
 .../hive/ql/udf/generic/GenericUDFOPTrue.java   |  65 +
 .../apache/hadoop/hive/ql/parse/TestIUD.java|   8 +-
 .../hive/ql/parse/TestMergeStatement.java   |   4 +-
 ql/src/test/queries/clientpositive/udf_isops.q  |  34 +
 .../results/clientpositive/show_functions.q.out |   8 +
 .../test/results/clientpositive/udf_isops.q.out | 146 +++
 15 files changed, 472 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index 1b556ac..bf18a8d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -359,6 +359,10 @@ public final class FunctionRegistry {
 
 system.registerGenericUDF("isnull", GenericUDFOPNull.class);
 system.registerGenericUDF("isnotnull", GenericUDFOPNotNull.class);
+system.registerGenericUDF("istrue", GenericUDFOPTrue.class);
+system.registerGenericUDF("isnottrue", GenericUDFOPNotTrue.class);
+system.registerGenericUDF("isfalse", GenericUDFOPFalse.class);
+system.registerGenericUDF("isnotfalse", GenericUDFOPNotFalse.class);
 
 system.registerGenericUDF("if", GenericUDFIf.class);
 system.registerGenericUDF("in", GenericUDFIn.class);

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
index 10f5eb3..c6b34d4 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
@@ -350,8 +350,8 @@ public class SqlFunctionConverter {
   registerFunction("in", HiveIn.INSTANCE, hToken(HiveParser.Identifier, 
"in"));
   registerFunction("between", HiveBetween.INSTANCE, 
hToken(HiveParser.Identifier, "between"));
   registerFunction("struct", SqlStdOperatorTable.ROW, 
hToken(HiveParser.Identifier, "struct"));
-  registerFunction("isnotnull", SqlStdOperatorTable.IS_NOT_NULL, 
hToken(HiveParser.TOK_ISNOTNULL, "TOK_ISNOTNULL"));
-  registerFunction("isnull", SqlStdOperatorTable.IS_NULL, 
hToken(HiveParser.TOK_ISNULL, "TOK_ISNULL"));
+  registerFunction("isnotnull", SqlStdOperatorTable.IS_NOT_NULL, 
hToken(HiveParser.Identifier, "isnotnull"));
+  registerFunction("isnull", SqlStdOperatorTable.IS_NULL, 
hToken(HiveParser.Identifier, "isnull"));
   registerFunction("is not distinct from", 
SqlStdOperatorTable.IS_NOT_DISTINCT_FROM, hToken(HiveParser.EQUAL_NS, "<=>"));
   registerFunction("when", SqlStdOperatorTable.CASE, 
hToken(HiveParser.Identifier, "when"));
   registerDuplicateFunction("case", SqlStdOperatorTable.CASE, 
hToken(HiveParser.Identifier, "when"));

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index ca639d

[2/2] hive git commit: HIVE-16562: Issues with nullif / fetch task (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-05-07 Thread kgyrtkirk
HIVE-16562: Issues with nullif / fetch task (Zoltan Haindrich, reviewed by 
Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0f8840a3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0f8840a3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0f8840a3

Branch: refs/heads/master
Commit: 0f8840a31a73f7b3278735ea4fb9cd4b0f3ae8d3
Parents: ec8c390
Author: Zoltan Haindrich 
Authored: Mon May 8 07:21:40 2017 +0200
Committer: Zoltan Haindrich 
Committed: Mon May 8 07:44:07 2017 +0200

--
 .../hive/ql/udf/generic/GenericUDFNullif.java   | 10 --
 .../ql/udf/generic/TestGenericUDFNullif.java| 20 +++
 ql/src/test/queries/clientpositive/udf_nullif.q | 11 ++
 .../results/clientpositive/udf_nullif.q.out | 37 
 4 files changed, 75 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0f8840a3/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFNullif.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFNullif.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFNullif.java
index 452c84e..5020ef8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFNullif.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFNullif.java
@@ -86,16 +86,20 @@ public class GenericUDFNullif extends GenericUDF {
   public Object evaluate(DeferredObject[] arguments) throws HiveException {
 Object arg0 = arguments[0].get();
 Object arg1 = arguments[1].get();
+Object value0 = null;
+if (arg0 != null) {
+  value0 = returnOIResolver.convertIfNecessary(arg0, argumentOIs[0], 
false);
+}
 if (arg0 == null || arg1 == null) {
-  return arg0;
+  return value0;
 }
 PrimitiveObjectInspector compareOI = (PrimitiveObjectInspector) 
returnOIResolver.get();
 if (PrimitiveObjectInspectorUtils.comparePrimitiveObjects(
-arg0, compareOI,
+value0, compareOI,
 returnOIResolver.convertIfNecessary(arg1, argumentOIs[1], false), 
compareOI)) {
   return null;
 }
-return arg0;
+return value0;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/0f8840a3/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFNullif.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFNullif.java 
b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFNullif.java
index a66e63e..3e6efd4 100644
--- 
a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFNullif.java
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFNullif.java
@@ -24,6 +24,8 @@ import 
org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredJavaObject;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredObject;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
+import org.apache.hadoop.hive.serde2.lazy.LazyInteger;
+import 
org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyPrimitiveObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
 import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
@@ -129,4 +131,22 @@ public class TestGenericUDFNullif {
 Assert.assertEquals(TypeInfoFactory.dateTypeInfo, oi.getTypeInfo());
 Assert.assertEquals(null, udf.evaluate(args));
   }
+
+  @Test
+  public void testLazy() throws HiveException {
+GenericUDFNullif udf = new GenericUDFNullif();
+
+ObjectInspector[] inputOIs = { 
LazyPrimitiveObjectInspectorFactory.LAZY_INT_OBJECT_INSPECTOR,
+LazyPrimitiveObjectInspectorFactory.LAZY_INT_OBJECT_INSPECTOR };
+LazyInteger a1 = new 
LazyInteger(LazyPrimitiveObjectInspectorFactory.LAZY_INT_OBJECT_INSPECTOR);
+LazyInteger a2 = new 
LazyInteger(LazyPrimitiveObjectInspectorFactory.LAZY_INT_OBJECT_INSPECTOR);
+a1.getWritableObject().set(1);
+a2.getWritableObject().set(1);
+
+DeferredObject[] args = { new DeferredJavaObject(a1), new 
DeferredJavaObject(a2) };
+
+PrimitiveObjectInspector oi = (PrimitiveObjectInspector) 
udf.initialize(inputOIs);
+Assert.assertEquals(TypeInfoFactory.intTypeInfo, oi.getTypeInfo());
+Assert.assertEquals(null, udf.evaluate(args));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/0f8840a3/ql/src/test/queries/clientpositive/udf_nullif.q
--
diff --git a/ql/src/test/queries/clientpositive/udf_nullif.

hive git commit: HIVE-16449: BeeLineDriver should handle query result sorting (Peter Vary via Zoltan Haindrich)

2017-05-05 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master f8f9155da -> 44804d82f


HIVE-16449: BeeLineDriver should handle query result sorting (Peter Vary via 
Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/44804d82
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/44804d82
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/44804d82

Branch: refs/heads/master
Commit: 44804d82f5c1226b0247680954fdd22fc3b200bd
Parents: f8f9155
Author: Peter Vary 
Authored: Fri May 5 13:01:31 2017 +0200
Committer: Zoltan Haindrich 
Committed: Fri May 5 13:02:01 2017 +0200

--
 .../java/org/apache/hive/beeline/Commands.java  | 12 +++
 .../org/apache/hive/beeline/OutputFile.java | 74 +--
 .../hive/cli/control/CoreBeeLineDriver.java |  4 +-
 .../hive/beeline/ConvertedOutputFile.java   | 94 
 .../java/org/apache/hive/beeline/QFile.java | 17 
 .../apache/hive/beeline/QFileBeeLineClient.java | 20 +++--
 .../clientpositive/beeline/smb_mapjoin_1.q.out  |  8 +-
 .../clientpositive/beeline/smb_mapjoin_2.q.out  | 16 ++--
 .../clientpositive/beeline/smb_mapjoin_3.q.out  | 28 +++---
 9 files changed, 232 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/44804d82/beeline/src/java/org/apache/hive/beeline/Commands.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/Commands.java 
b/beeline/src/java/org/apache/hive/beeline/Commands.java
index 08d53ca..407e018 100644
--- a/beeline/src/java/org/apache/hive/beeline/Commands.java
+++ b/beeline/src/java/org/apache/hive/beeline/Commands.java
@@ -1003,6 +1003,15 @@ public class Commands {
 beeLine.showWarnings();
 
 if (hasResults) {
+  OutputFile outputFile = beeLine.getRecordOutputFile();
+  if (beeLine.isTestMode() && outputFile != null && 
outputFile.isActiveConverter()) {
+outputFile.fetchStarted();
+if (!sql.trim().toLowerCase().startsWith("explain")) {
+  outputFile.foundQuery(true);
+} else {
+  outputFile.foundQuery(false);
+}
+  }
   do {
 ResultSet rs = stmnt.getResultSet();
 try {
@@ -1020,6 +1029,9 @@ public class Commands {
   rs.close();
 }
   } while (BeeLine.getMoreResults(stmnt));
+  if (beeLine.isTestMode() && outputFile != null && 
outputFile.isActiveConverter()) {
+outputFile.fetchFinished();
+  }
 } else {
   int count = stmnt.getUpdateCount();
   long end = System.currentTimeMillis();

http://git-wip-us.apache.org/repos/asf/hive/blob/44804d82/beeline/src/java/org/apache/hive/beeline/OutputFile.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/OutputFile.java 
b/beeline/src/java/org/apache/hive/beeline/OutputFile.java
index 1014af3..3d6c335 100644
--- a/beeline/src/java/org/apache/hive/beeline/OutputFile.java
+++ b/beeline/src/java/org/apache/hive/beeline/OutputFile.java
@@ -22,23 +22,83 @@
  */
 package org.apache.hive.beeline;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import java.io.File;
-import java.io.FileWriter;
 import java.io.IOException;
-import java.io.PrintWriter;
+import java.io.PrintStream;
 
 public class OutputFile {
-  final File file;
-  final PrintWriter out;
+  private final PrintStream out;
+  private final String filename;
 
   public OutputFile(String filename) throws IOException {
-file = new File(filename);
-out = new PrintWriter(new FileWriter(file));
+File file = new File(filename);
+this.filename = file.getAbsolutePath();
+this.out = new PrintStream(file, "UTF-8");
+  }
+
+  @VisibleForTesting
+  protected PrintStream getOut() {
+return out;
+  }
+
+  @VisibleForTesting
+  protected String getFilename() {
+return filename;
+  }
+
+  /**
+   * Constructor used by the decorating classes in tests.
+   * @param out The output stream
+   * @param filename The filename, to use in the toString() method
+   */
+  @VisibleForTesting
+  protected OutputFile(PrintStream out, String filename) {
+this.out = out;
+this.filename = filename;
+  }
+
+  /**
+   * Returns true if a FetchConverter is defined for writing the results. 
Should be used only for
+   * testing, otherwise returns false.
+   * @return True if a FetchConverter is active
+   */
+  boolean isActiveConverter() {
+return false;
+  }
+
+  /**
+   * Indicates that result fetching is started, and the converter should be 
activated. The
+   * Converter starts to collect the data when the fetch is started, and 
prints out

[09/11] hive git commit: HIVE-16146: If possible find a better way to filter the TestBeeLineDriver output(Peter Vary via Zoltan Haindrich, reviewed by Vihang Karajgaonkar)

2017-04-18 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/2509e2fa/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
--
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out 
b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
index 70a37ca..c943b03 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
@@ -1,948 +1,490 @@
->>>  set hive.strict.checks.bucketing=false;
-No rows affected 
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  create table smb_bucket_1(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create table 
smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table 
smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: query: create table smb_bucket_1(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_1
-INFO  : PREHOOK: Output: smb_mapjoin_1@smb_bucket_1
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create table smb_bucket_1(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_1
-INFO  : POSTHOOK: Output: smb_mapjoin_1@smb_bucket_1
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query create table smb_bucket_1(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-No rows affected 
->>>  create table smb_bucket_2(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create table 
smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table 
smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: query: create table smb_bucket_2(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_1
-INFO  : PREHOOK: Output: smb_mapjoin_1@smb_bucket_2
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create table smb_bucket_2(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_1
-INFO  : POSTHOOK: Output: smb_mapjoin_1@smb_bucket_2
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query create table smb_bucket_2(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-No rows affected 
->>>  create table smb_bucket_3(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create table 
smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table 
smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: query: create table smb_bucket_3(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_1
-INFO  : PREHOOK: Output: smb_mapjoin_1@smb_bucket_3
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create table smb_bucket_3(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_1
-INFO  : POSTHOOK: Output: smb_mapjoin_1

[07/11] hive git commit: HIVE-16146: If possible find a better way to filter the TestBeeLineDriver output(Peter Vary via Zoltan Haindrich, reviewed by Vihang Karajgaonkar)

2017-04-18 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/2509e2fa/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out
--
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out 
b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out
index 19c07a0..b53e670 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out
@@ -1,2563 +1,2161 @@
->>>  set hive.mapred.mode=nonstrict;
-No rows affected 
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  set hive.input.format = 
org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-No rows affected 
->>>  
->>>  set hive.cbo.enable=false;
-No rows affected 
->>>  
->>>  set hive.exec.reducers.max = 1;
-No rows affected 
->>>  set hive.merge.mapfiles=false;
-No rows affected 
->>>  set hive.merge.mapredfiles=false; 
-No rows affected 
->>>  
->>>  -- This test verifies that the output of a sort merge join on 2 
partitions (one on each side of the join) is bucketed
->>>  
->>>  -- Create two bucketed and sorted tables
->>>  CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds 
STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 
(key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY 
(key) INTO 16 BUCKETS
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 
(key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY 
(key) INTO 16 BUCKETS
-INFO  : PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) 
PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_11
-INFO  : PREHOOK: Output: smb_mapjoin_11@test_table1
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) 
PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_11
-INFO  : POSTHOOK: Output: smb_mapjoin_11@test_table1
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query CREATE TABLE test_table1 (key INT, value STRING) 
PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
-No rows affected 
->>>  CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds 
STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 
(key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY 
(key) INTO 16 BUCKETS
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 
(key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY 
(key) INTO 16 BUCKETS
-INFO  : PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) 
PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_11
-INFO  : PREHOOK: Output: smb_mapjoin_11@test_table2
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) 
PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_11
-INFO  : POSTHOOK: Output: smb_mapjoin_11@test_table2
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query CREATE TABLE test_table2 (key INT, value STRING) 
PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
-No rows affected 
->>>  
->>>  FROM default.src
-INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
-INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): FROM default.src
-INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
-INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:_col0, 
type:int, comment:nu

[11/11] hive git commit: HIVE-16146: If possible find a better way to filter the TestBeeLineDriver output(Peter Vary via Zoltan Haindrich, reviewed by Vihang Karajgaonkar)

2017-04-18 Thread kgyrtkirk
HIVE-16146: If possible find a better way to filter the TestBeeLineDriver 
output(Peter Vary via Zoltan Haindrich, reviewed by Vihang Karajgaonkar)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2509e2fa
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2509e2fa
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2509e2fa

Branch: refs/heads/master
Commit: 2509e2fa735b0b88a992acfef703c36936b09b23
Parents: 0e24943
Author: Peter Vary 
Authored: Tue Apr 18 07:32:11 2017 +0200
Committer: Zoltan Haindrich 
Committed: Tue Apr 18 08:15:26 2017 +0200

--
 .../java/org/apache/hive/beeline/BeeLine.java   |   19 +
 .../java/org/apache/hive/beeline/Commands.java  |   17 +-
 .../org/apache/hadoop/hive/conf/HiveConf.java   |7 +
 .../test/resources/testconfiguration.properties |4 +-
 .../java/org/apache/hive/beeline/QFile.java |  333 ++
 .../apache/hive/beeline/QFileBeeLineClient.java |  156 +
 .../org/apache/hive/beeline/package-info.java   |   22 +
 .../org/apache/hive/beeline/qfile/QFile.java|  336 --
 .../hive/beeline/qfile/QFileBeeLineClient.java  |  144 -
 .../apache/hive/beeline/qfile/package-info.java |   22 -
 .../hadoop/hive/ql/exec/mr/ExecDriver.java  |2 +
 .../hive/ql/log/LogDivertAppenderForTest.java   |  182 +
 .../hadoop/hive/ql/session/OperationLog.java|   59 +-
 .../beeline/drop_with_concurrency.q.out |   83 +-
 .../beeline/escape_comments.q.out   |  609 +--
 .../beeline/select_dummy_source.q.out   |  251 +
 .../clientpositive/beeline/smb_mapjoin_1.q.out  | 1406 ++
 .../clientpositive/beeline/smb_mapjoin_10.q.out |  331 +-
 .../clientpositive/beeline/smb_mapjoin_11.q.out | 4712 --
 .../clientpositive/beeline/smb_mapjoin_12.q.out | 1220 ++---
 .../clientpositive/beeline/smb_mapjoin_13.q.out | 1051 ++--
 .../clientpositive/beeline/smb_mapjoin_16.q.out |  338 +-
 .../clientpositive/beeline/smb_mapjoin_2.q.out  | 1421 ++
 .../clientpositive/beeline/smb_mapjoin_3.q.out  | 1412 ++
 .../clientpositive/beeline/smb_mapjoin_7.q.out  | 3053 +---
 .../service/cli/operation/OperationManager.java |2 +
 .../service/cli/session/HiveSessionImpl.java|3 +-
 27 files changed, 7096 insertions(+), 10099 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2509e2fa/beeline/src/java/org/apache/hive/beeline/BeeLine.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java 
b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index 27b353c..a589f33 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -151,6 +151,10 @@ public class BeeLine implements Closeable {
   // Indicates if this instance of beeline is running in compatibility mode, 
or beeline mode
   private boolean isBeeLine = true;
 
+  // Indicates that we are in test mode.
+  // Print only the errors, the operation log and the query results.
+  private boolean isTestMode = false;
+
   private static final Options options = new Options();
 
   public static final String BEELINE_DEFAULT_JDBC_DRIVER = 
"org.apache.hive.jdbc.HiveDriver";
@@ -2438,4 +2442,19 @@ public class BeeLine implements Closeable {
   public void setCurrentDatabase(String currentDatabase) {
 this.currentDatabase = currentDatabase;
   }
+
+  /**
+   * Setting the BeeLine into test mode.
+   * Print only the errors, the operation log and the query results.
+   * Should be used only by tests.
+   *
+   * @param isTestMode
+   */
+  void setIsTestMode(boolean isTestMode) {
+this.isTestMode = isTestMode;
+  }
+
+  boolean isTestMode() {
+return isTestMode;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/2509e2fa/beeline/src/java/org/apache/hive/beeline/Commands.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/Commands.java 
b/beeline/src/java/org/apache/hive/beeline/Commands.java
index d179b37..08d53ca 100644
--- a/beeline/src/java/org/apache/hive/beeline/Commands.java
+++ b/beeline/src/java/org/apache/hive/beeline/Commands.java
@@ -977,7 +977,8 @@ public class Commands {
   hasResults = ((CallableStatement) stmnt).execute();
 } else {
   stmnt = beeLine.createStatement();
-  if (beeLine.getOpts().isSilent()) {
+  // In test mode we want the operation logs regardless of the settings
+  if (!beeLine.isTestMode() && beeLine.getOpts().isSilent()) {
 hasResults = stmnt.execute(sql);
   } else {
 InPlaceUpdateStream.EventNotifier eventNotifier =
@@ -1341,7 +1342,12 @@ public class Commands {
   try {

[02/11] hive git commit: HIVE-16146: If possible find a better way to filter the TestBeeLineDriver output(Peter Vary via Zoltan Haindrich, reviewed by Vihang Karajgaonkar)

2017-04-18 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/2509e2fa/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out
--
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out 
b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out
index b15c951..82f5804 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out
@@ -1,1805 +1,1268 @@
->>>  set hive.strict.checks.bucketing=false;
-No rows affected 
->>>  
->>>  set hive.mapred.mode=nonstrict;
-No rows affected 
->>>  set hive.exec.reducers.max = 1;
-No rows affected 
->>>  
->>>  
->>>  CREATE TABLE smb_bucket4_1(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 2 BUCKETS;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE 
smb_bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 
BUCKETS
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE 
smb_bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 
BUCKETS
-INFO  : PREHOOK: query: CREATE TABLE smb_bucket4_1(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_7
-INFO  : PREHOOK: Output: smb_mapjoin_7@smb_bucket4_1
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: CREATE TABLE smb_bucket4_1(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_7
-INFO  : POSTHOOK: Output: smb_mapjoin_7@smb_bucket4_1
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query CREATE TABLE smb_bucket4_1(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
-No rows affected 
->>>  
->>>  
->>>  CREATE TABLE smb_bucket4_2(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 2 BUCKETS;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE 
smb_bucket4_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 
BUCKETS
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE 
smb_bucket4_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 
BUCKETS
-INFO  : PREHOOK: query: CREATE TABLE smb_bucket4_2(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_7
-INFO  : PREHOOK: Output: smb_mapjoin_7@smb_bucket4_2
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: CREATE TABLE smb_bucket4_2(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_7
-INFO  : POSTHOOK: Output: smb_mapjoin_7@smb_bucket4_2
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query CREATE TABLE smb_bucket4_2(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
-No rows affected 
->>>  
->>>  
->>>  
->>>  
->>>  create table smb_join_results(k1 int, v1 string, k2 int, v2 string);
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create table 
smb_join_results(k1 int, v1 string, k2 int, v2 string)
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table 
smb_join_results(k1 int, v1 string, k2 int, v2 string)
-INFO  : PREHOOK: query: create table smb_join_results(k1 int, v1 string, k2 
int, v2 string)
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_7
-INFO  : PREHOOK: Output: smb_mapjoin_7@smb_join_results
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create table smb_join_results(k1 int, v1 string, k2 
int, v2 string)
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_7
-INFO  : POSTHOOK: Output: smb_mapjoin_7@smb_join_results
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query create table smb_join_results(k1 int, v1 string, 
k2 int, v2 string)
-No rows affected 
->>>  create table smb_join_results_empty_big

[05/11] hive git commit: HIVE-16146: If possible find a better way to filter the TestBeeLineDriver output(Peter Vary via Zoltan Haindrich, reviewed by Vihang Karajgaonkar)

2017-04-18 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/2509e2fa/ql/src/test/results/clientpositive/beeline/smb_mapjoin_13.q.out
--
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_13.q.out 
b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_13.q.out
index d303900..49ff635 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_13.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_13.q.out
@@ -1,687 +1,388 @@
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  set hive.input.format = 
org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-No rows affected 
->>>  set hive.cbo.enable=false;
-No rows affected 
->>>  
->>>  set hive.exec.reducers.max = 1;
-No rows affected 
->>>  set hive.merge.mapfiles=false;
-No rows affected 
->>>  set hive.merge.mapredfiles=false; 
-No rows affected 
->>>  
->>>  -- This test verifies that the sort merge join optimizer works when the 
tables are joined on columns with different names
->>>  
->>>  -- Create bucketed and sorted tables
->>>  CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) 
SORTED BY (key ASC) INTO 16 BUCKETS;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 
(key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 
(key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS
-INFO  : PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) 
CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_13
-INFO  : PREHOOK: Output: smb_mapjoin_13@test_table1
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) 
CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_13
-INFO  : POSTHOOK: Output: smb_mapjoin_13@test_table1
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query CREATE TABLE test_table1 (key INT, value STRING) 
CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS
-No rows affected 
->>>  CREATE TABLE test_table2 (value INT, key STRING) CLUSTERED BY (value) 
SORTED BY (value ASC) INTO 16 BUCKETS;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 
(value INT, key STRING) CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 
BUCKETS
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 
(value INT, key STRING) CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 
BUCKETS
-INFO  : PREHOOK: query: CREATE TABLE test_table2 (value INT, key STRING) 
CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 BUCKETS
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_13
-INFO  : PREHOOK: Output: smb_mapjoin_13@test_table2
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: CREATE TABLE test_table2 (value INT, key STRING) 
CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 BUCKETS
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_13
-INFO  : POSTHOOK: Output: smb_mapjoin_13@test_table2
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query CREATE TABLE test_table2 (value INT, key STRING) 
CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 BUCKETS
-No rows affected 
->>>  CREATE TABLE test_table3 (key INT, value STRING) CLUSTERED BY (key, 
value) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table3 
(key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (key ASC, value 
ASC) INTO 16 BUCKETS
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table3 
(key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (key ASC, value 
ASC) INTO 16 BUCKETS
-INFO  : PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) 
CLUSTERED BY (key, value) SORTED BY (key ASC, value ASC) I

[08/11] hive git commit: HIVE-16146: If possible find a better way to filter the TestBeeLineDriver output(Peter Vary via Zoltan Haindrich, reviewed by Vihang Karajgaonkar)

2017-04-18 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/2509e2fa/ql/src/test/results/clientpositive/beeline/smb_mapjoin_10.q.out
--
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_10.q.out 
b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_10.q.out
index bdfaefb..50706f4 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_10.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_10.q.out
@@ -1,248 +1,107 @@
->>>  set hive.strict.checks.bucketing=false;
-No rows affected 
->>>  
->>>  
->>>  create table tmp_smb_bucket_10(userid int, pageid int, postid int, type 
string) partitioned by (ds string) CLUSTERED BY (userid) SORTED BY (pageid, 
postid, type, userid) INTO 2 BUCKETS STORED AS RCFILE; 
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create table 
tmp_smb_bucket_10(userid int, pageid int, postid int, type string) partitioned 
by (ds string) CLUSTERED BY (userid) SORTED BY (pageid, postid, type, userid) 
INTO 2 BUCKETS STORED AS RCFILE
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table 
tmp_smb_bucket_10(userid int, pageid int, postid int, type string) partitioned 
by (ds string) CLUSTERED BY (userid) SORTED BY (pageid, postid, type, userid) 
INTO 2 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: query: create table tmp_smb_bucket_10(userid int, pageid int, 
postid int, type string) partitioned by (ds string) CLUSTERED BY (userid) 
SORTED BY (pageid, postid, type, userid) INTO 2 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_10
-INFO  : PREHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create table tmp_smb_bucket_10(userid int, pageid 
int, postid int, type string) partitioned by (ds string) CLUSTERED BY (userid) 
SORTED BY (pageid, postid, type, userid) INTO 2 BUCKETS STORED AS RCFILE
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_10
-INFO  : POSTHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query create table tmp_smb_bucket_10(userid int, pageid 
int, postid int, type string) partitioned by (ds string) CLUSTERED BY (userid) 
SORTED BY (pageid, postid, type, userid) INTO 2 BUCKETS STORED AS RCFILE
-No rows affected 
->>>  
->>>  alter table tmp_smb_bucket_10 add partition (ds = '1');
-INFO  : Compiling commandqueryId=(!!{queryId}!!): alter table 
tmp_smb_bucket_10 add partition (ds = '1')
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): alter table 
tmp_smb_bucket_10 add partition (ds = '1')
-INFO  : PREHOOK: query: alter table tmp_smb_bucket_10 add partition (ds = '1')
-INFO  : PREHOOK: type: ALTERTABLE_ADDPARTS
-INFO  : PREHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: alter table tmp_smb_bucket_10 add partition (ds = '1')
-INFO  : POSTHOOK: type: ALTERTABLE_ADDPARTS
-INFO  : POSTHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10
-INFO  : POSTHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10@ds=1
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query alter table tmp_smb_bucket_10 add partition (ds = 
'1')
-No rows affected 
->>>  alter table tmp_smb_bucket_10 add partition (ds = '2');
-INFO  : Compiling commandqueryId=(!!{queryId}!!): alter table 
tmp_smb_bucket_10 add partition (ds = '2')
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): alter table 
tmp_smb_bucket_10 add partition (ds = '2')
-INFO  : PREHOOK: query: alter table tmp_smb_bucket_10 add partition (ds = '2')
-INFO  : PREHOOK: type: ALTERTABLE_ADDPARTS
-INFO  : PREHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: alter table tmp_smb_bucket_10 add partition (ds = '2')
-INFO  : POSTHOOK: type: ALTERTABLE_ADDPARTS
-INFO  : POSTHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10
-INFO  : POSTHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10@ds=2
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query alter table tmp_smb

[03/11] hive git commit: HIVE-16146: If possible find a better way to filter the TestBeeLineDriver output(Peter Vary via Zoltan Haindrich, reviewed by Vihang Karajgaonkar)

2017-04-18 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/2509e2fa/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out
--
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out 
b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out
index 6c9b8e4..f639ba4 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out
@@ -1,950 +1,494 @@
->>>  set hive.cbo.enable=false;
-No rows affected 
->>>  set hive.strict.checks.bucketing=false;
-No rows affected 
->>>  
->>>  -- SORT_QUERY_RESULTS
->>>  
->>>  
->>>  
->>>  create table smb_bucket_1(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create table 
smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table 
smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: query: create table smb_bucket_1(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_3
-INFO  : PREHOOK: Output: smb_mapjoin_3@smb_bucket_1
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create table smb_bucket_1(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_3
-INFO  : POSTHOOK: Output: smb_mapjoin_3@smb_bucket_1
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query create table smb_bucket_1(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-No rows affected 
->>>  create table smb_bucket_2(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create table 
smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table 
smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: query: create table smb_bucket_2(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_3
-INFO  : PREHOOK: Output: smb_mapjoin_3@smb_bucket_2
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create table smb_bucket_2(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_3
-INFO  : POSTHOOK: Output: smb_mapjoin_3@smb_bucket_2
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query create table smb_bucket_2(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-No rows affected 
->>>  create table smb_bucket_3(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create table 
smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table 
smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: query: create table smb_bucket_3(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_3
-INFO  : PREHOOK: Output: smb_mapjoin_3@smb_bucket_3
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create table smb_bucket_3(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK

[10/11] hive git commit: HIVE-16146: If possible find a better way to filter the TestBeeLineDriver output(Peter Vary via Zoltan Haindrich, reviewed by Vihang Karajgaonkar)

2017-04-18 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/2509e2fa/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
--
diff --git a/ql/src/test/results/clientpositive/beeline/escape_comments.q.out 
b/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
index b440d9c..07fef57 100644
--- a/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
+++ b/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
@@ -1,406 +1,217 @@
->>>  create database escape_comments_db comment 'a\nb';
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create database 
escape_comments_db comment 'a\nb'
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create database 
escape_comments_db comment 'a\nb'
-INFO  : PREHOOK: query: create database escape_comments_db comment 'a\nb'
-INFO  : PREHOOK: type: CREATEDATABASE
-INFO  : PREHOOK: Output: database:escape_comments_db
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create database escape_comments_db comment 'a\nb'
-INFO  : POSTHOOK: type: CREATEDATABASE
-INFO  : POSTHOOK: Output: database:escape_comments_db
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query create database escape_comments_db comment 'a\nb'
-No rows affected 
->>>  use escape_comments_db;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): use escape_comments_db
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): use escape_comments_db
-INFO  : PREHOOK: query: use escape_comments_db
-INFO  : PREHOOK: type: SWITCHDATABASE
-INFO  : PREHOOK: Input: database:escape_comments_db
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: use escape_comments_db
-INFO  : POSTHOOK: type: SWITCHDATABASE
-INFO  : POSTHOOK: Input: database:escape_comments_db
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query use escape_comments_db
-No rows affected 
->>>  create table escape_comments_tbl1
-(col1 string comment 'a\nb\'\;') comment 'a\nb'
-partitioned by (p1 string comment 'a\nb');
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create table 
escape_comments_tbl1
-(col1 string comment 'a\nb\'\;') comment 'a\nb'
-partitioned by (p1 string comment 'a\nb')
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table 
escape_comments_tbl1
-(col1 string comment 'a\nb\'\;') comment 'a\nb'
-partitioned by (p1 string comment 'a\nb')
-INFO  : PREHOOK: query: create table escape_comments_tbl1
+PREHOOK: query: create database escape_comments_db comment 'a\nb'
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:escape_comments_db
+POSTHOOK: query: create database escape_comments_db comment 'a\nb'
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:escape_comments_db
+PREHOOK: query: use escape_comments_db
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:escape_comments_db
+POSTHOOK: query: use escape_comments_db
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:escape_comments_db
+PREHOOK: query: create table escape_comments_tbl1
 (col1 string comment 'a\nb\'\;') comment 'a\nb'
 partitioned by (p1 string comment 'a\nb')
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:escape_comments_db
-INFO  : PREHOOK: Output: escape_comments_db@escape_comments_tbl1
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create table escape_comments_tbl1
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:escape_comments_db
+PREHOOK: Output: escape_comments_db@escape_comments_tbl1
+POSTHOOK: query: create table escape_comments_tbl1
 (col1 string comment 'a\nb\'\;') comment 'a\nb'
 partitioned by (p1 string comment 'a\nb')
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:escape_comments_db
-INFO  : POSTHOOK: Output: escape_comments_db@escape_comments_tbl1
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query create table escape_comments_tbl1
-(col1 string comment 'a\nb\'\;') comment 'a\nb'
-partitioned by (p1 string comment 'a\nb')
-No rows affected 
->>>  create view escape_comments_view1 (col1 comment 'a\nb') comment 'a\nb'
-as select col1 from escape_comm

[04/11] hive git commit: HIVE-16146: If possible find a better way to filter the TestBeeLineDriver output(Peter Vary via Zoltan Haindrich, reviewed by Vihang Karajgaonkar)

2017-04-18 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/2509e2fa/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out
--
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out 
b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out
index 22a2d6a..1ea6553 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out
@@ -1,955 +1,498 @@
->>>  set hive.strict.checks.bucketing=false;
-No rows affected 
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  create table smb_bucket_1(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create table 
smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table 
smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: query: create table smb_bucket_1(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_2
-INFO  : PREHOOK: Output: smb_mapjoin_2@smb_bucket_1
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create table smb_bucket_1(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_2
-INFO  : POSTHOOK: Output: smb_mapjoin_2@smb_bucket_1
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query create table smb_bucket_1(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-No rows affected 
->>>  create table smb_bucket_2(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create table 
smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table 
smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: query: create table smb_bucket_2(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_2
-INFO  : PREHOOK: Output: smb_mapjoin_2@smb_bucket_2
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create table smb_bucket_2(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_2
-INFO  : POSTHOOK: Output: smb_mapjoin_2@smb_bucket_2
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query create table smb_bucket_2(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-No rows affected 
->>>  create table smb_bucket_3(key int, value string) CLUSTERED BY (key) 
SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create table 
smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table 
smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 
BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: query: create table smb_bucket_3(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_2
-INFO  : PREHOOK: Output: smb_mapjoin_2@smb_bucket_3
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create table smb_bucket_3(key int, value string) 
CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_2
-INFO  : POSTHOOK: Output: smb_mapjoin_2

[01/11] hive git commit: HIVE-16146: If possible find a better way to filter the TestBeeLineDriver output(Peter Vary via Zoltan Haindrich, reviewed by Vihang Karajgaonkar)

2017-04-18 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 0e2494356 -> 2509e2fa7


http://git-wip-us.apache.org/repos/asf/hive/blob/2509e2fa/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
--
diff --git 
a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java 
b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
index f62ee4e..ac64ab2 100644
--- 
a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
+++ 
b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Schema;
 import org.apache.hadoop.hive.ql.log.LogDivertAppender;
+import org.apache.hadoop.hive.ql.log.LogDivertAppenderForTest;
 import org.apache.hadoop.hive.ql.session.OperationLog;
 import org.apache.hive.service.AbstractService;
 import org.apache.hive.service.cli.FetchOrientation;
@@ -71,6 +72,7 @@ public class OperationManager extends AbstractService {
   @Override
   public synchronized void init(HiveConf hiveConf) {
 LogDivertAppender.registerRoutingAppender(hiveConf);
+LogDivertAppenderForTest.registerRoutingAppenderIfInTest(hiveConf);
 
 if (hiveConf.isWebUiQueryInfoCacheEnabled()) {
   historicSqlOperations = new SQLOperationDisplayCache(

http://git-wip-us.apache.org/repos/asf/hive/blob/2509e2fa/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
--
diff --git 
a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java 
b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
index 7df4563..f5d7ec0 100644
--- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
+++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
@@ -777,7 +777,8 @@ public class HiveSessionImpl implements HiveSession {
   }
 
   private void cleanupSessionLogDir() {
-if (isOperationLogEnabled) {
+// In case of test, if we might not want to remove the log directory
+if (isOperationLogEnabled && 
sessionConf.getBoolVar(ConfVars.HIVE_IN_TEST_REMOVE_LOGS)) {
   try {
 FileUtils.forceDelete(sessionLogDir);
 LOG.info("Operation log session directory is deleted: "



[06/11] hive git commit: HIVE-16146: If possible find a better way to filter the TestBeeLineDriver output(Peter Vary via Zoltan Haindrich, reviewed by Vihang Karajgaonkar)

2017-04-18 Thread kgyrtkirk
http://git-wip-us.apache.org/repos/asf/hive/blob/2509e2fa/ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out
--
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out 
b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out
index 98bf25e..9928a60 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out
@@ -1,822 +1,430 @@
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  set hive.input.format = 
org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-No rows affected 
->>>  
->>>  
->>>  set hive.exec.reducers.max = 1;
-No rows affected 
->>>  set hive.merge.mapfiles=false;
-No rows affected 
->>>  set hive.merge.mapredfiles=false; 
-No rows affected 
->>>  set hive.cbo.enable=false;
-No rows affected 
->>>  -- This test verifies that the output of a sort merge join on 1 big 
partition with multiple small partitions is bucketed and sorted
->>>  
->>>  -- Create two bucketed and sorted tables
->>>  CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds 
STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 
(key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY 
(key) INTO 16 BUCKETS
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 
(key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY 
(key) INTO 16 BUCKETS
-INFO  : PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) 
PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_12
-INFO  : PREHOOK: Output: smb_mapjoin_12@test_table1
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) 
PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_12
-INFO  : POSTHOOK: Output: smb_mapjoin_12@test_table1
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query CREATE TABLE test_table1 (key INT, value STRING) 
PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
-No rows affected 
->>>  CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds 
STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 
(key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY 
(key) INTO 16 BUCKETS
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 
(key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY 
(key) INTO 16 BUCKETS
-INFO  : PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) 
PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_12
-INFO  : PREHOOK: Output: smb_mapjoin_12@test_table2
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) 
PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_12
-INFO  : POSTHOOK: Output: smb_mapjoin_12@test_table2
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: 
!!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query CREATE TABLE test_table2 (key INT, value STRING) 
PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
-No rows affected 
->>>  
->>>  FROM default.src
-INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
-INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
-INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT *
-INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '3') SELECT *;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): FROM default.src
-INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
-INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
-INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2'

hive git commit: HIVE-16356: Table#validateColumns should avoid checking exhaustively for matches in a list (Janos Gub via Zoltan Haindrich)

2017-04-17 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master aaa67fa63 -> 0e2494356


HIVE-16356: Table#validateColumns should avoid checking exhaustively for 
matches in a list (Janos Gub via Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0e249435
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0e249435
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0e249435

Branch: refs/heads/master
Commit: 0e24943568f9db85aed5d2e618aebad617669932
Parents: aaa67fa
Author: Janos Gub 
Authored: Tue Apr 18 07:03:17 2017 +0200
Committer: Zoltan Haindrich 
Committed: Tue Apr 18 07:04:19 2017 +0200

--
 ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0e249435/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index 171f944..3122689 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -22,10 +22,12 @@ import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.Set;
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -972,7 +974,7 @@ public class Table implements Serializable {
 
   public static void validateColumns(List columns, 
List partCols)
   throws HiveException {
-List colNames = new ArrayList();
+Set colNames = new HashSet<>();
 for (FieldSchema partCol: columns) {
   String colName = normalize(partCol.getName());
   if (colNames.contains(colName)) {



hive git commit: HIVE-15833: Add unit tests for org.json usage on branch-1 (Daniel Voros via Zoltan Haindrich)

2017-03-31 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/branch-1 1a9ed419e -> f8db95803


HIVE-15833: Add unit tests for org.json usage on branch-1 (Daniel Voros via 
Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f8db9580
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f8db9580
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f8db9580

Branch: refs/heads/branch-1
Commit: f8db9580337e3d01058b8bdcc312c38ed046fbd6
Parents: 1a9ed41
Author: Daniel Voros 
Authored: Fri Mar 31 08:53:08 2017 +0200
Committer: Zoltan Haindrich 
Committed: Fri Mar 31 09:00:42 2017 +0200

--
 .../hadoop/hive/common/jsonexplain/tez/Op.java  |   4 +-
 .../hive/common/jsonexplain/tez/TestOp.java |  58 
 .../hive/common/jsonexplain/tez/TestStage.java  | 195 +
 .../jsonexplain/tez/TestTezJsonParser.java  |  54 
 .../hive/common/jsonexplain/tez/TestVertex.java | 106 +++
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |  16 +-
 .../apache/hadoop/hive/ql/hooks/ATSHook.java|   4 +-
 .../apache/hadoop/hive/ql/parse/EximUtil.java   |   4 +-
 .../hadoop/hive/ql/exec/TestExplainTask.java| 281 +++
 .../hadoop/hive/ql/hooks/TestATSHook.java   |  53 
 .../hadoop/hive/ql/parse/TestEximUtil.java  | 103 +++
 11 files changed, 870 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f8db9580/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
--
diff --git 
a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java 
b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
index fb12f70..cc4947f 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
@@ -25,6 +25,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.json.JSONException;
 import org.json.JSONObject;
 
@@ -54,7 +55,8 @@ public class Op {
 this.vertex = vertex;
   }
 
-  private void inlineJoinOp() throws Exception {
+  @VisibleForTesting
+  void inlineJoinOp() throws Exception {
 // inline map join operator
 if (this.name.equals("Map Join Operator")) {
   JSONObject mapjoinObj = opObject.getJSONObject("Map Join Operator");

http://git-wip-us.apache.org/repos/asf/hive/blob/f8db9580/common/src/test/org/apache/hadoop/hive/common/jsonexplain/tez/TestOp.java
--
diff --git 
a/common/src/test/org/apache/hadoop/hive/common/jsonexplain/tez/TestOp.java 
b/common/src/test/org/apache/hadoop/hive/common/jsonexplain/tez/TestOp.java
new file mode 100644
index 000..fc8381b
--- /dev/null
+++ b/common/src/test/org/apache/hadoop/hive/common/jsonexplain/tez/TestOp.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common.jsonexplain.tez;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.json.JSONObject;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestOp {
+
+  private ObjectMapper objectMapper = new ObjectMapper();
+
+  @Test
+  public void testInlineJoinOpJsonShouldMatch() throws Exception {
+String jsonString = "{\"Map Join Operator\":{" +
+"\"input vertices:\":{\"a\":\"AVERTEX\"}," +
+"\"keys:\":{\"a\":\"AKEY\",\"b\":\"BKEY\"}}}";
+JSONObject mapJoin = new JSONObject(jsonString);
+
+Vertex vertex = new Vertex("vertex-name", null);
+
+List attrs = new ArrayList<>();
+
+Op uut = new Op("Map Join Operator", "op-id", "output-vertex-name", 
Collections.EMPTY_LIST,
+attrs, mapJoin, vertex);
+uut

hive git commit: HIVE-16256: Flaky test: TestCliDriver.testCliDriver[comments] (Barna Zsombor Klara via Zoltan Haindrich)

2017-03-30 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 89c02dd38 -> 79d51230e


HIVE-16256: Flaky test: TestCliDriver.testCliDriver[comments] (Barna Zsombor 
Klara via Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/79d51230
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/79d51230
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/79d51230

Branch: refs/heads/master
Commit: 79d51230e1668c79570b58045b0f67f0c91791e5
Parents: 89c02dd
Author: Barna Zsombor Klara 
Authored: Fri Mar 31 08:38:25 2017 +0200
Committer: Zoltan Haindrich 
Committed: Fri Mar 31 08:39:16 2017 +0200

--
 .../test/queries/clientpositive/updateAccessTime.q  |  5 +
 .../results/clientpositive/updateAccessTime.q.out   | 16 
 2 files changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/79d51230/ql/src/test/queries/clientpositive/updateAccessTime.q
--
diff --git a/ql/src/test/queries/clientpositive/updateAccessTime.q 
b/ql/src/test/queries/clientpositive/updateAccessTime.q
index c65ad42..36d3f5e 100644
--- a/ql/src/test/queries/clientpositive/updateAccessTime.q
+++ b/ql/src/test/queries/clientpositive/updateAccessTime.q
@@ -29,3 +29,8 @@ desc extended tstsrcpart partition (ds='2008-04-08', hr='11');
 desc extended tstsrcpart partition (ds='2008-04-08', hr='12');
 
 drop table tstsrcpart;
+
+set hive.exec.pre.hooks = org.apache.hadoop.hive.ql.hooks.PreExecutePrinter;
+
+ANALYZE TABLE src COMPUTE STATISTICS;
+ANALYZE TABLE src COMPUTE STATISTICS FOR COLUMNS key,value;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/79d51230/ql/src/test/results/clientpositive/updateAccessTime.q.out
--
diff --git a/ql/src/test/results/clientpositive/updateAccessTime.q.out 
b/ql/src/test/results/clientpositive/updateAccessTime.q.out
index d7e6651..2dcd930 100644
--- a/ql/src/test/results/clientpositive/updateAccessTime.q.out
+++ b/ql/src/test/results/clientpositive/updateAccessTime.q.out
@@ -215,3 +215,19 @@ POSTHOOK: query: drop table tstsrcpart
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@tstsrcpart
 POSTHOOK: Output: default@tstsrcpart
+PREHOOK: query: ANALYZE TABLE src COMPUTE STATISTICS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src
+POSTHOOK: query: ANALYZE TABLE src COMPUTE STATISTICS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src
+PREHOOK: query: ANALYZE TABLE src COMPUTE STATISTICS FOR COLUMNS key,value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+ A masked pattern was here 
+POSTHOOK: query: ANALYZE TABLE src COMPUTE STATISTICS FOR COLUMNS key,value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+ A masked pattern was here 



hive git commit: HIVE-16178: corr/covar_samp UDAF standard compliance (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-03-21 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 3c1dfe379 -> d97e4874d


HIVE-16178: corr/covar_samp UDAF standard compliance (Zoltan Haindrich, 
reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d97e4874
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d97e4874
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d97e4874

Branch: refs/heads/master
Commit: d97e4874dc0e09c535e8cb908f6b17698e49a5d6
Parents: 3c1dfe3
Author: Zoltan Haindrich 
Authored: Tue Mar 21 07:37:28 2017 +0100
Committer: Zoltan Haindrich 
Committed: Tue Mar 21 18:08:41 2017 +0100

--
 .../generic/GenericUDAFBinarySetFunctions.java  | 28 +++---
 .../ql/udf/generic/GenericUDAFCorrelation.java  | 26 ++
 .../generic/GenericUDAFCovarianceSample.java| 18 +++
 .../TestGenericUDAFBinarySetFunctions.java  |  6 +--
 .../queries/clientpositive/cbo_rp_windowing_2.q |  2 +-
 .../clientpositive/udaf_binarysetfunctions.q|  1 +
 ql/src/test/queries/clientpositive/windowing.q  |  2 +-
 .../llap/cbo_rp_windowing_2.q.out   | 54 ++--
 .../results/clientpositive/llap/windowing.q.out | 54 ++--
 .../clientpositive/spark/windowing.q.out| 54 ++--
 .../udaf_binarysetfunctions.q.out   | 10 ++--
 .../test/results/clientpositive/udaf_corr.q.out | 13 +++--
 .../clientpositive/udaf_covar_samp.q.out| 16 +++---
 13 files changed, 142 insertions(+), 142 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d97e4874/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFBinarySetFunctions.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFBinarySetFunctions.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFBinarySetFunctions.java
index e799a94..674c527 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFBinarySetFunctions.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFBinarySetFunctions.java
@@ -294,19 +294,16 @@ public class GenericUDAFBinarySetFunctions extends 
AbstractGenericUDAFResolver {
   return new Evaluator();
 }
 
-/**
- * NOTE: corr is declared as corr(x,y) instead corr(y,x)
- */
 private static class Evaluator extends GenericUDAFCorrelationEvaluator {
 
   @Override
   public Object terminate(AggregationBuffer agg) throws HiveException {
 StdAgg myagg = (StdAgg) agg;
 
-if (myagg.count < 2 || myagg.yvar == 0.0d) {
+if (myagg.count < 2 || myagg.xvar == 0.0d) {
   return null;
 } else {
-  getResult().set(myagg.covar / myagg.yvar);
+  getResult().set(myagg.covar / myagg.xvar);
   return getResult();
 }
   }
@@ -328,23 +325,20 @@ public class GenericUDAFBinarySetFunctions extends 
AbstractGenericUDAFResolver {
   return new Evaluator();
 }
 
-/**
- * NOTE: corr is declared as corr(x,y) instead corr(y,x)
- */
 private static class Evaluator extends GenericUDAFCorrelationEvaluator {
 
   @Override
   public Object terminate(AggregationBuffer agg) throws HiveException {
 StdAgg myagg = (StdAgg) agg;
 
-if (myagg.count < 2 || myagg.yvar == 0.0d) {
+if (myagg.count < 2 || myagg.xvar == 0.0d) {
   return null;
 }
 DoubleWritable result = getResult();
-if (myagg.xvar == 0.0d) {
+if (myagg.yvar == 0.0d) {
   result.set(1.0d);
 } else {
-  result.set(myagg.covar * myagg.covar / myagg.yvar / myagg.xvar);
+  result.set(myagg.covar * myagg.covar / myagg.xvar / myagg.yvar);
 }
 return result;
   }
@@ -365,9 +359,6 @@ public class GenericUDAFBinarySetFunctions extends 
AbstractGenericUDAFResolver {
   return new Evaluator();
 }
 
-/**
- * NOTE: corr is declared as corr(x,y) instead corr(y,x)
- */
 private static class Evaluator extends GenericUDAFCorrelationEvaluator {
 
   @Override
@@ -398,21 +389,18 @@ public class GenericUDAFBinarySetFunctions extends 
AbstractGenericUDAFResolver {
   return new Evaluator();
 }
 
-/**
- * NOTE: corr is declared as corr(x,y) instead corr(y,x)
- */
 private static class Evaluator extends GenericUDAFCorrelationEvaluator {
 
   @Override
   public Object terminate(AggregationBuffer agg) throws HiveException {
 StdAgg myagg = (StdAgg) agg;
 
-if (myagg.count == 0) {
+if (myagg.count == 0 || myagg.xvar == 0.0d) {
   return null;
 }
 DoubleWritable result = getResult();
-double slope = myagg.covar / myagg.yvar;
-result.set(myagg

hive git commit: HIVE-15978: Support regr_* functions (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

2017-03-16 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 4cb87670e -> 0e62d3dcb


HIVE-15978: Support regr_* functions (Zoltan Haindrich, reviewed by Ashutosh 
Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0e62d3dc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0e62d3dc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0e62d3dc

Branch: refs/heads/master
Commit: 0e62d3dcb9e7945f140fc17fe8eca628579d5385
Parents: 4cb8767
Author: Zoltan Haindrich 
Authored: Thu Mar 16 18:59:10 2017 +0100
Committer: Zoltan Haindrich 
Committed: Thu Mar 16 19:42:01 2017 +0100

--
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |  10 +
 .../generic/GenericUDAFBinarySetFunctions.java  | 464 +++
 .../TestGenericUDAFBinarySetFunctions.java  | 416 +
 .../clientpositive/udaf_binarysetfunctions.q|  57 +++
 .../results/clientpositive/show_functions.q.out |  10 +
 .../udaf_binarysetfunctions.q.out   | 464 +++
 6 files changed, 1421 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0e62d3dc/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index 4ac25c2..e3ace2a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -418,6 +418,16 @@ public final class FunctionRegistry {
 system.registerGenericUDAF("covar_pop", new GenericUDAFCovariance());
 system.registerGenericUDAF("covar_samp", new 
GenericUDAFCovarianceSample());
 system.registerGenericUDAF("corr", new GenericUDAFCorrelation());
+system.registerGenericUDAF("regr_slope", new 
GenericUDAFBinarySetFunctions.RegrSlope());
+system.registerGenericUDAF("regr_intercept", new 
GenericUDAFBinarySetFunctions.RegrIntercept());
+system.registerGenericUDAF("regr_r2", new 
GenericUDAFBinarySetFunctions.RegrR2());
+system.registerGenericUDAF("regr_sxx", new 
GenericUDAFBinarySetFunctions.RegrSXX());
+system.registerGenericUDAF("regr_syy", new 
GenericUDAFBinarySetFunctions.RegrSYY());
+system.registerGenericUDAF("regr_sxy", new 
GenericUDAFBinarySetFunctions.RegrSXY());
+system.registerGenericUDAF("regr_avgx", new 
GenericUDAFBinarySetFunctions.RegrAvgX());
+system.registerGenericUDAF("regr_avgy", new 
GenericUDAFBinarySetFunctions.RegrAvgY());
+system.registerGenericUDAF("regr_count", new 
GenericUDAFBinarySetFunctions.RegrCount());
+
 system.registerGenericUDAF("histogram_numeric", new 
GenericUDAFHistogramNumeric());
 system.registerGenericUDAF("percentile_approx", new 
GenericUDAFPercentileApprox());
 system.registerGenericUDAF("collect_set", new GenericUDAFCollectSet());

http://git-wip-us.apache.org/repos/asf/hive/blob/0e62d3dc/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFBinarySetFunctions.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFBinarySetFunctions.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFBinarySetFunctions.java
new file mode 100644
index 000..e799a94
--- /dev/null
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFBinarySetFunctions.java
@@ -0,0 +1,464 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import 
org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage.GenericUDAFAverageEvaluatorDouble;
+import 
org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage.GenericUDAFAverageEva

hive git commit: HIVE-16127: Separate database initialization from actual query run in TestBeeLineDriver(Peter Vary via Zoltan Haindrich reviewed by Vihang Karajgaonkar, Barna Zsombor Klara)

2017-03-13 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 78d860ec5 -> 95796e172


HIVE-16127: Separate database initialization from actual query run in 
TestBeeLineDriver(Peter Vary via Zoltan Haindrich reviewed by Vihang 
Karajgaonkar, Barna Zsombor Klara)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/95796e17
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/95796e17
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/95796e17

Branch: refs/heads/master
Commit: 95796e1727d1799449d9fdf2f10f9db530fa690e
Parents: 78d860e
Author: Peter Vary 
Authored: Mon Mar 13 21:00:54 2017 +0100
Committer: Zoltan Haindrich 
Committed: Mon Mar 13 21:32:12 2017 +0100

--
 .../apache/hive/beeline/util/QFileClient.java   | 382 ---
 .../test/resources/testconfiguration.properties |   3 +-
 .../hive/cli/control/CoreBeeLineDriver.java | 145 ---
 .../org/apache/hive/beeline/qfile/QFile.java| 273 +
 .../hive/beeline/qfile/QFileBeeLineClient.java  | 149 
 .../apache/hive/beeline/qfile/package-info.java |  22 ++
 .../beeline/drop_with_concurrency.q.out |  67 
 7 files changed, 600 insertions(+), 441 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/95796e17/beeline/src/java/org/apache/hive/beeline/util/QFileClient.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/util/QFileClient.java 
b/beeline/src/java/org/apache/hive/beeline/util/QFileClient.java
deleted file mode 100644
index d99483e..000
--- a/beeline/src/java/org/apache/hive/beeline/util/QFileClient.java
+++ /dev/null
@@ -1,382 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hive.beeline.util;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.regex.Pattern;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.util.Shell;
-import org.apache.hive.common.util.StreamPrinter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hive.beeline.BeeLine;
-
-/**
- * QTestClient.
- *
- */
-public class QFileClient {
-  private String username;
-  private String password;
-  private String jdbcUrl;
-  private String jdbcDriver;
-
-  private final File hiveRootDirectory;
-  private File qFileDirectory;
-  private File outputDirectory;
-  private File expectedDirectory;
-  private final File scratchDirectory;
-  private final File warehouseDirectory;
-  private final File initScript;
-  private final File cleanupScript;
-
-  private File testDataDirectory;
-  private File testScriptDirectory;
-
-  private String qFileName;
-  private String testname;
-
-  private File qFile;
-  private File outputFile;
-  private File expectedFile;
-
-  private PrintStream beelineOutputStream;
-
-  private BeeLine beeLine;
-
-  private RegexFilterSet filterSet;
-
-  private boolean hasErrors = false;
-
-  private static final Logger LOG = LoggerFactory
-  .getLogger(QFileClient.class.getName());
-
-
-  public QFileClient(HiveConf hiveConf, String hiveRootDirectory, String 
qFileDirectory, String outputDirectory,
-  String expectedDirectory, String initScript, String cleanupScript) {
-this.hiveRootDirectory = new File(hiveRootDirectory);
-this.qFileDirectory = new File(qFileDirectory);
-this.outputDirectory = new File(outputDirectory);
-this.expectedDirectory = new File(expectedDirectory);
-this.initScript = new File(initScript);
-this.cleanupScript = new File(cleanupScript);
-this.scratchDirectory = new File(hiveConf.getVar(ConfVars.SCRATCHDIR));
-this.warehouseDirectory = new 
File(hiveConf.getVar(ConfVars.METASTOREWAREHOUSE));
-  }
-
-
-  private class RegexF

hive git commit: HIVE-16119: HiveMetaStoreChecker: remove singleThread logic duplication (Zoltan Haindrich reviewed by Vihang Karajgaonkar, Ashutosh Chauhan)

2017-03-09 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 74372256d -> ed2f46aa7


HIVE-16119: HiveMetaStoreChecker: remove singleThread logic duplication (Zoltan 
Haindrich reviewed by Vihang Karajgaonkar, Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ed2f46aa
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ed2f46aa
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ed2f46aa

Branch: refs/heads/master
Commit: ed2f46aa737efb859f23d357fbeafe1b42e7d404
Parents: 7437225
Author: Zoltan Haindrich 
Authored: Thu Mar 9 08:32:35 2017 +0100
Committer: Zoltan Haindrich 
Committed: Thu Mar 9 08:55:43 2017 +0100

--
 .../hive/ql/metadata/HiveMetaStoreChecker.java  | 90 
 .../ql/metadata/TestHiveMetaStoreChecker.java   | 39 +
 2 files changed, 35 insertions(+), 94 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ed2f46aa/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
index 3420ef8..6805c17 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
@@ -28,11 +28,12 @@ import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Sets;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.slf4j.Logger;
@@ -52,6 +53,7 @@ import 
org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner;
 import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
 import org.apache.thrift.TException;
 
+import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
@@ -411,35 +413,19 @@ public class HiveMetaStoreChecker {
 // pool here the smaller sized pool of the two becomes a bottleneck
 int poolSize = 
conf.getInt(ConfVars.METASTORE_FS_HANDLER_THREADS_COUNT.varname, 15);
 
-// Check if too low config is provided for move files. 2x CPU is 
reasonable max count.
-poolSize = poolSize == 0 ? poolSize : Math.max(poolSize,
-getMinPoolSize());
-
-// Fixed thread pool on need basis
-final ThreadPoolExecutor pool = poolSize > 0 ? (ThreadPoolExecutor)
-Executors.newFixedThreadPool(poolSize,
-new 
ThreadFactoryBuilder().setDaemon(true).setNameFormat("MSCK-GetPaths-%d").build())
 : null;
-
-if (pool == null) {
-  LOG.debug("Not-using threaded version of MSCK-GetPaths");
-  Queue basePaths = new LinkedList<>();
-  basePaths.add(basePath);
-  checkPartitionDirsSingleThreaded(basePaths, allDirs, 
basePath.getFileSystem(conf), maxDepth,
-  maxDepth);
+ExecutorService executor;
+if (poolSize <= 1) {
+  LOG.debug("Using single-threaded version of MSCK-GetPaths");
+  executor = MoreExecutors.sameThreadExecutor();
 } else {
-  LOG.debug("Using multi-threaded version of MSCK-GetPaths with number of 
threads "
-  + pool.getMaximumPoolSize());
-  checkPartitionDirsInParallel((ThreadPoolExecutor) pool, basePath, 
allDirs,
-  basePath.getFileSystem(conf), maxDepth);
+  LOG.debug("Using multi-threaded version of MSCK-GetPaths with number of 
threads " + poolSize);
+  ThreadFactory threadFactory =
+  new 
ThreadFactoryBuilder().setDaemon(true).setNameFormat("MSCK-GetPaths-%d").build();
+  executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(poolSize, 
threadFactory);
 }
-if (pool != null) {
-  pool.shutdown();
-}
-  }
+checkPartitionDirs(executor, basePath, allDirs, 
basePath.getFileSystem(conf), maxDepth);
 
-  @VisibleForTesting
-  int getMinPoolSize() {
-return Runtime.getRuntime().availableProcessors() * 2;
+executor.shutdown();
   }
 
   private final class PathDepthInfoCallable implements Callable {
@@ -515,7 +501,7 @@ public class HiveMetaStoreChecker {
 }
   }
 
-  private void checkPartitionDirsInParallel(final ThreadPoolExecutor pool,
+  private void checkPartitionDirs(final ExecutorService executor,
   final Path basePath, final Set result,
   final FileSystem fs, final int maxDepth) throws HiveException {
 try {
@@ -534,7 +520,7 @@ public class 

hive git commit: HIVE-16101: QTest failure BeeLine escape_comments after HIVE-16045(Peter Vary via Zoltan Haindirch)

2017-03-07 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master b123300e2 -> 79c5092a3


HIVE-16101: QTest failure BeeLine escape_comments after HIVE-16045(Peter Vary 
via Zoltan Haindirch)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/79c5092a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/79c5092a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/79c5092a

Branch: refs/heads/master
Commit: 79c5092a3e1e42f2710768ee8edb4862eefec9b2
Parents: b123300
Author: Peter Vary 
Authored: Tue Mar 7 09:00:55 2017 +0100
Committer: Zoltan Haindrich 
Committed: Tue Mar 7 09:18:30 2017 +0100

--
 beeline/src/java/org/apache/hive/beeline/util/QFileClient.java | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/79c5092a/beeline/src/java/org/apache/hive/beeline/util/QFileClient.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/util/QFileClient.java 
b/beeline/src/java/org/apache/hive/beeline/util/QFileClient.java
index d306b7f..d99483e 100644
--- a/beeline/src/java/org/apache/hive/beeline/util/QFileClient.java
+++ b/beeline/src/java/org/apache/hive/beeline/util/QFileClient.java
@@ -130,6 +130,8 @@ public class QFileClient {
 
 filterSet = new RegexFilterSet()
 .addFilter(logPattern,"")
+.addFilter("going to print operations logs\n","")
+.addFilter("printed operations logs\n","")
 .addFilter("Getting log thread is interrupted, since query is done!\n","")
 .addFilter(scratchDirectory.toString() + "[\\w\\-/]+", 
"!!{hive.exec.scratchdir}!!")
 .addFilter(warehouseDirectory.toString(), 
"!!{hive.metastore.warehouse.dir}!!")



[2/2] hive git commit: HIVE-14459: TestBeeLineDriver - migration and re-enable (Peter Vary via Zoltan Haindrich reviewed by Vihang Karajgaonkar)

2017-03-01 Thread kgyrtkirk
HIVE-14459: TestBeeLineDriver - migration and re-enable (Peter Vary via Zoltan 
Haindrich reviewed by Vihang Karajgaonkar)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ba8de307
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ba8de307
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ba8de307

Branch: refs/heads/master
Commit: ba8de3077673e3a9486c8a9d1625b9640936eedc
Parents: 92aaed2
Author: Peter Vary 
Authored: Wed Mar 1 23:21:34 2017 +0100
Committer: Zoltan Haindrich 
Committed: Wed Mar 1 23:47:35 2017 +0100

--
 .../apache/hive/beeline/util/QFileClient.java   |  72 ++-
 .../hive/jdbc/miniHS2/AbstractHiveService.java  | 159 --
 .../org/apache/hive/jdbc/miniHS2/MiniHS2.java   | 556 ---
 .../hive/cli/DisabledTestBeeLineDriver.java |  62 ---
 .../hadoop/hive/cli/TestBeeLineDriver.java  |  62 +++
 .../test/resources/testconfiguration.properties | 158 +-
 itests/util/pom.xml |   5 +
 .../hadoop/hive/cli/control/CliConfigs.java |  14 +-
 .../hive/cli/control/CoreBeeLineDriver.java |  95 ++--
 .../hive/jdbc/miniHS2/AbstractHiveService.java  | 159 ++
 .../org/apache/hive/jdbc/miniHS2/MiniHS2.java   | 556 +++
 .../beeline/escape_comments.q.out   | 416 ++
 12 files changed, 1307 insertions(+), 1007 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ba8de307/beeline/src/java/org/apache/hive/beeline/util/QFileClient.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/util/QFileClient.java 
b/beeline/src/java/org/apache/hive/beeline/util/QFileClient.java
index 81f1b0e..d306b7f 100644
--- a/beeline/src/java/org/apache/hive/beeline/util/QFileClient.java
+++ b/beeline/src/java/org/apache/hive/beeline/util/QFileClient.java
@@ -21,12 +21,15 @@ package org.apache.hive.beeline.util;
 import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
+import java.util.ArrayList;
 import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.regex.Pattern;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.util.Shell;
+import org.apache.hive.common.util.StreamPrinter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -49,6 +52,8 @@ public class QFileClient {
   private File expectedDirectory;
   private final File scratchDirectory;
   private final File warehouseDirectory;
+  private final File initScript;
+  private final File cleanupScript;
 
   private File testDataDirectory;
   private File testScriptDirectory;
@@ -73,11 +78,13 @@ public class QFileClient {
 
 
   public QFileClient(HiveConf hiveConf, String hiveRootDirectory, String 
qFileDirectory, String outputDirectory,
-  String expectedDirectory) {
+  String expectedDirectory, String initScript, String cleanupScript) {
 this.hiveRootDirectory = new File(hiveRootDirectory);
 this.qFileDirectory = new File(qFileDirectory);
 this.outputDirectory = new File(outputDirectory);
 this.expectedDirectory = new File(expectedDirectory);
+this.initScript = new File(initScript);
+this.cleanupScript = new File(cleanupScript);
 this.scratchDirectory = new File(hiveConf.getVar(ConfVars.SCRATCHDIR));
 this.warehouseDirectory = new 
File(hiveConf.getVar(ConfVars.METASTOREWAREHOUSE));
   }
@@ -110,6 +117,9 @@ public class QFileClient {
 String timePattern = "(Mon|Tue|Wed|Thu|Fri|Sat|Sun) "
 + "(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) "
 + "\\d{2} \\d{2}:\\d{2}:\\d{2} \\w+ 20\\d{2}";
+// Pattern to remove the timestamp and other infrastructural info from the 
out file
+String logPattern = 
"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d*\\s+\\S+\\s+\\[" +
+".*\\]\\s+\\S+:\\s+";
 String unixTimePattern = "\\D" + currentTimePrefix + "\\d{6}\\D";
 String unixTimeMillisPattern = "\\D" + currentTimePrefix + "\\d{9}\\D";
 
@@ -119,12 +129,15 @@ public class QFileClient {
   + "|SCR|SEL|STATS|TS|UDTF|UNION)_\\d+\"";
 
 filterSet = new RegexFilterSet()
+.addFilter(logPattern,"")
+.addFilter("Getting log thread is interrupted, since query is done!\n","")
 .addFilter(scratchDirectory.toString() + "[\\w\\-/]+", 
"!!{hive.exec.scratchdir}!!")
 .addFilter(warehouseDirectory.toString(), 
"!!{hive.metastore.warehouse.dir}!!")
 .addFilter(expectedDirectory.toString(), "!!{expectedDirectory}!!")
 .addFilter(outputDirectory.toString(), "!!{outputDirectory}!!")
 .addFilter(qFileDirectory.toString(), "!!{qFileDirectory}!!")
 .addFilter(hiveRootDirectory.toString(

[1/2] hive git commit: HIVE-14459: TestBeeLineDriver - migration and re-enable (Peter Vary via Zoltan Haindrich reviewed by Vihang Karajgaonkar)

2017-03-01 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 92aaed243 -> ba8de3077


http://git-wip-us.apache.org/repos/asf/hive/blob/ba8de307/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
--
diff --git a/ql/src/test/results/clientpositive/beeline/escape_comments.q.out 
b/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
new file mode 100644
index 000..0cbc8d6
--- /dev/null
+++ b/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
@@ -0,0 +1,416 @@
+>>>  !run !!{qFileDirectory}!!/escape_comments.q
+>>>  create database escape_comments_db comment 'a\nb';
+Acquired the compile lock.
+Compiling commandqueryId=(!!{queryId}!!): create database escape_comments_db 
comment 'a\nb'
+Semantic Analysis Completed
+Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! 
seconds
+Executing commandqueryId=(!!{queryId}!!): create database escape_comments_db 
comment 'a\nb'
+PREHOOK: query: create database escape_comments_db comment 'a\nb'
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:escape_comments_db
+Starting task [Stage-0:DDL] in serial mode
+POSTHOOK: query: create database escape_comments_db comment 'a\nb'
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:escape_comments_db
+Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! 
seconds
+OK
+Shutting down query create database escape_comments_db comment 'a\nb'
+No rows affected 
+>>>  use escape_comments_db;
+Acquired the compile lock.
+Compiling commandqueryId=(!!{queryId}!!): use escape_comments_db
+Semantic Analysis Completed
+Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! 
seconds
+Executing commandqueryId=(!!{queryId}!!): use escape_comments_db
+PREHOOK: query: use escape_comments_db
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:escape_comments_db
+Starting task [Stage-0:DDL] in serial mode
+POSTHOOK: query: use escape_comments_db
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:escape_comments_db
+Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! 
seconds
+OK
+Shutting down query use escape_comments_db
+No rows affected 
+>>>  create table escape_comments_tbl1 
+(col1 string comment 'a\nb\'\;') comment 'a\nb' 
+partitioned by (p1 string comment 'a\nb');
+Acquired the compile lock.
+Compiling commandqueryId=(!!{queryId}!!): create table escape_comments_tbl1 
+(col1 string comment 'a\nb\'\;') comment 'a\nb' 
+partitioned by (p1 string comment 'a\nb')
+Semantic Analysis Completed
+Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! 
seconds
+Executing commandqueryId=(!!{queryId}!!): create table escape_comments_tbl1 
+(col1 string comment 'a\nb\'\;') comment 'a\nb' 
+partitioned by (p1 string comment 'a\nb')
+PREHOOK: query: create table escape_comments_tbl1 
+(col1 string comment 'a\nb\'\;') comment 'a\nb' 
+partitioned by (p1 string comment 'a\nb')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:escape_comments_db
+PREHOOK: Output: escape_comments_db@escape_comments_tbl1
+Starting task [Stage-0:DDL] in serial mode
+POSTHOOK: query: create table escape_comments_tbl1 
+(col1 string comment 'a\nb\'\;') comment 'a\nb' 
+partitioned by (p1 string comment 'a\nb')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:escape_comments_db
+POSTHOOK: Output: escape_comments_db@escape_comments_tbl1
+Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! 
seconds
+OK
+Shutting down query create table escape_comments_tbl1 
+(col1 string comment 'a\nb\'\;') comment 'a\nb' 
+partitioned by (p1 string comment 'a\nb')
+No rows affected 
+>>>  create view escape_comments_view1 (col1 comment 'a\nb') comment 'a\nb' 
+as select col1 from escape_comments_tbl1;
+Acquired the compile lock.
+Compiling commandqueryId=(!!{queryId}!!): create view escape_comments_view1 
(col1 comment 'a\nb') comment 'a\nb' 
+as select col1 from escape_comments_tbl1
+Semantic Analysis Completed
+Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:col1, 
type:string, comment:null)], properties:null)
+Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! 
seconds
+Executing commandqueryId=(!!{queryId}!!): create view escape_comments_view1 
(col1 comment 'a\nb') comment 'a\nb' 
+as select col1 from escape_comments_tbl1
+PREHOOK: query: create view escape_comments_view1 (col1 comment 'a\nb') 
comment 'a\nb' 
+as select col1 from escape_comments_tbl1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: escape_comments_db@escape_comments_tbl1
+PREHOOK: Output: database:escape_comments_db
+PREHOOK: Output: escape_comments_db@escape_comments_view1
+Starting task [Stage-1:DDL] in serial mode
+POSTHOO

hive git commit: HIVE-15848: count or sum distinct incorrect when hive.optimize.reducededuplication set to true (Zoltan Haindrich reviewed by Ashutosh Chauhan)

2017-03-01 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 3e94fb22b -> b9ad6dc38


HIVE-15848: count or sum distinct incorrect when 
hive.optimize.reducededuplication set to true (Zoltan Haindrich reviewed by 
Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b9ad6dc3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b9ad6dc3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b9ad6dc3

Branch: refs/heads/master
Commit: b9ad6dc3867efce4dd833e519c788aab280dabd9
Parents: 3e94fb2
Author: Haindrich Zoltán (kirk) 
Authored: Wed Mar 1 22:30:13 2017 +0100
Committer: Haindrich Zoltán (kirk) 
Committed: Wed Mar 1 22:35:49 2017 +0100

--
 .../test/resources/testconfiguration.properties |   1 +
 .../correlation/ReduceSinkDeDuplication.java|   4 +
 .../reduce_deduplicate_distinct.q   |  54 +++
 .../llap/reduce_deduplicate_distinct.q.out  | 483 +++
 4 files changed, 542 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b9ad6dc3/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 807b124..9c6a069 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -413,6 +413,7 @@ minillap.query.files=acid_bucket_pruning.q,\
   llap_udf.q,\
   llapdecider.q,\
   reduce_deduplicate.q,\
+  reduce_deduplicate_distinct.q, \
   remote_script.q,\
   tez_aggr_part_stats.q,\
   tez_union_view.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/b9ad6dc3/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
index d53efbf..2b075be 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
@@ -360,6 +360,10 @@ public class ReduceSinkDeDuplication extends Transform {
   if (moveRSOrderTo == null) {
 return null;
   }
+  // if cRS is being used for distinct - the two reduce sinks are 
incompatible
+  if (cConf.getDistinctColumnIndices().size() >= 2) {
+return null;
+  }
   Integer moveReducerNumTo = checkNumReducer(cConf.getNumReducers(), 
pConf.getNumReducers());
   if (moveReducerNumTo == null ||
   moveReducerNumTo > 0 && cConf.getNumReducers() < minReducer) {

http://git-wip-us.apache.org/repos/asf/hive/blob/b9ad6dc3/ql/src/test/queries/clientpositive/reduce_deduplicate_distinct.q
--
diff --git a/ql/src/test/queries/clientpositive/reduce_deduplicate_distinct.q 
b/ql/src/test/queries/clientpositive/reduce_deduplicate_distinct.q
new file mode 100644
index 000..840025c
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/reduce_deduplicate_distinct.q
@@ -0,0 +1,54 @@
+create table count_distinct_test(id int,key int,name int);
+
+insert into count_distinct_test values (1,1,2),(1,2,3),(1,3,2),(1,4,2),(1,5,3);
+
+-- simple case; no need for opt
+explain select id,count(distinct key),count(distinct name)
+from count_distinct_test
+group by id;
+
+select id,count(distinct key),count(distinct name)
+from count_distinct_test
+group by id;
+
+-- dedup on
+set hive.optimize.reducededuplication=true;
+
+-- candidate1
+explain select id,count(Distinct key),count(Distinct name)
+from (select id,key,name from count_distinct_test group by id,key,name)m
+group by id;
+
+select id,count(Distinct key),count(Distinct name)
+from (select id,key,name from count_distinct_test group by id,key,name)m
+group by id;
+
+-- candidate2
+explain select id,count(Distinct name),count(Distinct key)
+from (select id,key,name from count_distinct_test group by id,name,key)m
+group by id;
+
+select id,count(Distinct name),count(Distinct key)
+from (select id,key,name from count_distinct_test group by id,name,key)m
+group by id;
+
+-- deduplication off
+set hive.optimize.reducededuplication=false;
+
+-- candidate1
+explain select id,count(Distinct key),count(Distinct name)
+from (select id,key,name from count_distinct_test group by id,key,name)m
+group by id;
+
+select id,count(Distinct key),count(Distinct name)
+from (select id,key,name from count_distinct_test group by id,key,name)m
+group by id;
+
+-- candidate2
+explain select id,count(Distinct name),count(Distinc

<    5   6   7   8   9   10