hive git commit: HIVE-19509: Disable tests that are failing continuously (addendum)

2018-05-14 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 8f754fc35 -> 116bf0bcb


HIVE-19509: Disable tests that are failing continuously (addendum)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/116bf0bc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/116bf0bc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/116bf0bc

Branch: refs/heads/master
Commit: 116bf0bcb75695e0d7de2f70310b9f5a9e60276c
Parents: 8f754fc
Author: Jesus Camacho Rodriguez 
Authored: Mon May 14 22:47:26 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Mon May 14 22:47:26 2018 -0700

--
 .../main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java| 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/116bf0bc/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
--
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
index ac609df..5c66609 100644
--- 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
+++ 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
@@ -231,6 +231,7 @@ public class CliConfigs {
 excludeQuery("union_fast_stats.q"); // Disabled in HIVE-19509
 excludeQuery("schema_evol_orc_acidvec_part.q"); // Disabled in 
HIVE-19509
 excludeQuery("schema_evol_orc_vec_part_llap_io.q"); // Disabled in 
HIVE-19509
+excludeQuery("tez_dynpart_hashjoin_1.q"); // Disabled in HIVE-19509
 
 setResultsDir("ql/src/test/results/clientpositive/llap");
 setLogDir("itests/qtest/target/qfile-results/clientpositive");



hive git commit: HIVE-19509: Disable tests that are failing continuously (addendum)

2018-05-14 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 76da8043e -> 8f754fc35


HIVE-19509: Disable tests that are failing continuously (addendum)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8f754fc3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8f754fc3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8f754fc3

Branch: refs/heads/master
Commit: 8f754fc351daf66846eb2cee658c5259d5008afd
Parents: 76da804
Author: Jesus Camacho Rodriguez 
Authored: Mon May 14 22:18:43 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Mon May 14 22:18:43 2018 -0700

--
 .../org/apache/hadoop/hive/cli/TestMiniDruidKafkaCliDriver.java| 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8f754fc3/itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMiniDruidKafkaCliDriver.java
--
diff --git 
a/itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMiniDruidKafkaCliDriver.java
 
b/itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMiniDruidKafkaCliDriver.java
index 4768975..e2d26ab 100644
--- 
a/itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMiniDruidKafkaCliDriver.java
+++ 
b/itests/qtest/src/test/java/org/apache/hadoop/hive/cli/TestMiniDruidKafkaCliDriver.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hive.cli.control.CliConfigs;
 
 import org.junit.ClassRule;
 import org.junit.Rule;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.rules.TestRule;
 import org.junit.runner.RunWith;
@@ -31,6 +32,7 @@ import org.junit.runners.Parameterized.Parameters;
 import java.io.File;
 import java.util.List;
 
+@Ignore("HIVE-19509: Disable tests that are failing continuously")
 @RunWith(Parameterized.class)
 public class TestMiniDruidKafkaCliDriver {
 



hive git commit: HIVE-19509: Disable tests that are failing continuously (addendum)

2018-05-14 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 1d8362538 -> 76da8043e


HIVE-19509: Disable tests that are failing continuously (addendum)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/76da8043
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/76da8043
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/76da8043

Branch: refs/heads/master
Commit: 76da8043e5d64f4fbc0739c4fd4d0212bec3f66f
Parents: 1d83625
Author: Jesus Camacho Rodriguez 
Authored: Mon May 14 21:27:00 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Mon May 14 21:27:00 2018 -0700

--
 .../src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java  | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/76da8043/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
 
b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
index 937c7a9..51e491c 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
@@ -769,6 +769,7 @@ public class TestBeeLineWithArgs {
* Test Beeline could show the query progress for time-consuming query.
* @throws Throwable
*/
+  @Ignore("HIVE-19509: Disable tests that are failing continuously")
   @Test
   public void testQueryProgress() throws Throwable {
 final String SCRIPT_TEXT =



hive git commit: HIVE-19509: Disable tests that are failing continuously (addendum)

2018-05-14 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 11647b85c -> 1d8362538


HIVE-19509: Disable tests that are failing continuously (addendum)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1d836253
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1d836253
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1d836253

Branch: refs/heads/master
Commit: 1d83625385d466b18018201fb89e3541f2fc46b6
Parents: 11647b8
Author: Jesus Camacho Rodriguez 
Authored: Mon May 14 16:46:51 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Mon May 14 18:59:35 2018 -0700

--
 .../hive/beeline/TestBeeLineWithArgs.java   |  2 ++
 .../hadoop/hive/cli/control/CliConfigs.java | 22 +++-
 .../hive/cli/control/TestDanglingQOuts.java |  1 +
 .../org/apache/hadoop/hive/ql/TestTxnExIm.java  |  2 ++
 .../llap/tez_vector_dynpart_hashjoin_1.q.out|  4 ++--
 .../metastore/TestNonCatCallsWithCatalog.java   |  2 ++
 .../apache/hadoop/hive/metastore/TestStats.java |  2 ++
 7 files changed, 23 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1d836253/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
 
b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
index 55c6c23..937c7a9 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
@@ -55,6 +55,7 @@ import org.apache.hive.jdbc.miniHS2.MiniHS2.MiniClusterType;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 
 /**
@@ -795,6 +796,7 @@ public class TestBeeLineWithArgs {
*
* @throws Throwable
*/
+  @Ignore("HIVE-19509: Disable tests that are failing continuously")
   @Test
   public void testQueryProgressParallel() throws Throwable {
 final String SCRIPT_TEXT = "set hive.support.concurrency = false;\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/1d836253/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
--
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
index ffa9b0e..ac609df 100644
--- 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
+++ 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
@@ -58,7 +58,7 @@ public class CliConfigs {
 excludesFrom(testConfigProps, "druid.query.files");
 excludesFrom(testConfigProps, "druid.kafka.query.files");
 
-excludeQuery("fouter_join_ppr.q");
+excludeQuery("fouter_join_ppr.q"); // Disabled in HIVE-19509
 
 setResultsDir("ql/src/test/results/clientpositive");
 setLogDir("itests/qtest/target/qfile-results/clientpositive");
@@ -198,6 +198,8 @@ public class CliConfigs {
 
 includesFrom(testConfigProps, "druid.kafka.query.files");
 
+excludeQuery("druidkafkamini_basic.q"); // Disabled in HIVE-19509
+
 setResultsDir("ql/src/test/results/clientpositive/druid");
 setLogDir("itests/qtest/target/tmp/log");
 
@@ -222,13 +224,13 @@ public class CliConfigs {
 
 includesFrom(testConfigProps, "minillaplocal.query.files");
 includesFrom(testConfigProps, "minillaplocal.shared.query.files");
-excludeQuery("bucket_map_join_tez1.q");
-excludeQuery("special_character_in_tabnames_1.q");
-excludeQuery("sysdb.q");
-excludeQuery("tez_smb_1.q");
-excludeQuery("union_fast_stats.q");
-excludeQuery("schema_evol_orc_acidvec_part.q");
-excludeQuery("schema_evol_orc_vec_part_llap_io.q");
+excludeQuery("bucket_map_join_tez1.q"); // Disabled in HIVE-19509
+excludeQuery("special_character_in_tabnames_1.q"); // Disabled in 
HIVE-19509
+excludeQuery("sysdb.q"); // Disabled in HIVE-19509
+excludeQuery("tez_smb_1.q"); // Disabled in HIVE-19509
+excludeQuery("union_fast_stats.q"); // Disabled in HIVE-19509
+excludeQuery("schema_evol_orc_acidvec_part.q"); // Disabled in 
HIVE-19509
+excludeQuery("schema_evol_orc_vec_part_llap_io.q"); // Disabled in 
HIVE-19509
 
 setResultsDir("ql/src/test/results/clientpositive/llap");
 setLogDir("itests/qtest/target/qfile-results/clientpositive");
@@ -369,8 +371,8 @@ public class CliConfigs {
 

hive git commit: HIVE-19534: Allow implementations to access member variables of AbstractRecordWriter (Prasanth Jayachandran reviewed by Matt Burgess, Ashutosh Chauhan)

2018-05-14 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/master 14da40097 -> 11647b85c


HIVE-19534: Allow implementations to access member variables of 
AbstractRecordWriter (Prasanth Jayachandran reviewed by Matt Burgess, Ashutosh 
Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/11647b85
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/11647b85
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/11647b85

Branch: refs/heads/master
Commit: 11647b85c0ede0e71b68c2b7f356f5d056a8c964
Parents: 14da400
Author: Prasanth Jayachandran 
Authored: Mon May 14 17:22:21 2018 -0700
Committer: Prasanth Jayachandran 
Committed: Mon May 14 17:22:21 2018 -0700

--
 .../hive/streaming/AbstractRecordWriter.java| 92 ++--
 1 file changed, 46 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/11647b85/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
--
diff --git 
a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java 
b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
index b6c8890..0866850 100644
--- a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
+++ b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
@@ -64,48 +64,48 @@ public abstract class AbstractRecordWriter implements 
RecordWriter {
 
   private static final String DEFAULT_LINE_DELIMITER_PATTERN = "[\r\n]";
   protected HiveConf conf;
-  private StreamingConnection conn;
+  protected StreamingConnection conn;
   protected Table table;
-  List inputColumns;
-  List inputTypes;
-  private String fullyQualifiedTableName;
-  private Map updaters = new HashMap<>();
-  private Map partitionPaths = new HashMap<>();
-  private Set addedPartitions = new HashSet<>();
+  protected List inputColumns;
+  protected List inputTypes;
+  protected String fullyQualifiedTableName;
+  protected Map updaters = new HashMap<>();
+  protected Map partitionPaths = new HashMap<>();
+  protected Set addedPartitions = new HashSet<>();
   // input OI includes table columns + partition columns
-  private StructObjectInspector inputRowObjectInspector;
+  protected StructObjectInspector inputRowObjectInspector;
   // output OI strips off the partition columns and retains other columns
-  private ObjectInspector outputRowObjectInspector;
-  private List partitionColumns = new ArrayList<>();
-  private ObjectInspector[] partitionObjInspectors = null;
-  private StructField[] partitionStructFields = null;
-  private Object[] partitionFieldData;
-  private ObjectInspector[] bucketObjInspectors = null;
-  private StructField[] bucketStructFields = null;
-  private Object[] bucketFieldData;
-  private List bucketIds = new ArrayList<>();
-  private int totalBuckets;
-  private String defaultPartitionName;
-  private boolean isBucketed;
-  private AcidOutputFormat acidOutputFormat;
-  private Long curBatchMinWriteId;
-  private Long curBatchMaxWriteId;
-  private final String lineDelimiter;
-  private HeapMemoryMonitor heapMemoryMonitor;
+  protected ObjectInspector outputRowObjectInspector;
+  protected List partitionColumns = new ArrayList<>();
+  protected ObjectInspector[] partitionObjInspectors = null;
+  protected StructField[] partitionStructFields = null;
+  protected Object[] partitionFieldData;
+  protected ObjectInspector[] bucketObjInspectors = null;
+  protected StructField[] bucketStructFields = null;
+  protected Object[] bucketFieldData;
+  protected List bucketIds = new ArrayList<>();
+  protected int totalBuckets;
+  protected String defaultPartitionName;
+  protected boolean isBucketed;
+  protected AcidOutputFormat acidOutputFormat;
+  protected Long curBatchMinWriteId;
+  protected Long curBatchMaxWriteId;
+  protected final String lineDelimiter;
+  protected HeapMemoryMonitor heapMemoryMonitor;
   // if low memory canary is set and if records after set canary exceeds 
threshold, trigger a flush.
   // This is to avoid getting notified of low memory too often and flushing 
too often.
-  private AtomicBoolean lowMemoryCanary;
-  private long ingestSizeBytes = 0;
-  private boolean autoFlush;
-  private float memoryUsageThreshold;
-  private long ingestSizeThreshold;
+  protected AtomicBoolean lowMemoryCanary;
+  protected long ingestSizeBytes = 0;
+  protected boolean autoFlush;
+  protected float memoryUsageThreshold;
+  protected long ingestSizeThreshold;
 
   public AbstractRecordWriter(final String lineDelimiter) {
 this.lineDelimiter = lineDelimiter == null || lineDelimiter.isEmpty() ?
   DEFAULT_LINE_DELIMITER_PATTERN : lineDelimiter;
   

hive git commit: HIVE-19534: Allow implementations to access member variables of AbstractRecordWriter (Prasanth Jayachandran reviewed by Matt Burgess, Ashutosh Chauhan)

2018-05-14 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/branch-3 66f6748a0 -> 3ea0356f7


HIVE-19534: Allow implementations to access member variables of 
AbstractRecordWriter (Prasanth Jayachandran reviewed by Matt Burgess, Ashutosh 
Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3ea0356f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3ea0356f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3ea0356f

Branch: refs/heads/branch-3
Commit: 3ea0356f7dd9fc4d3406806d80c349187afd9d64
Parents: 66f6748
Author: Prasanth Jayachandran 
Authored: Mon May 14 17:19:34 2018 -0700
Committer: Prasanth Jayachandran 
Committed: Mon May 14 17:19:34 2018 -0700

--
 .../hive/streaming/AbstractRecordWriter.java| 92 ++--
 1 file changed, 46 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3ea0356f/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
--
diff --git 
a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java 
b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
index b6c8890..0866850 100644
--- a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
+++ b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
@@ -64,48 +64,48 @@ public abstract class AbstractRecordWriter implements 
RecordWriter {
 
   private static final String DEFAULT_LINE_DELIMITER_PATTERN = "[\r\n]";
   protected HiveConf conf;
-  private StreamingConnection conn;
+  protected StreamingConnection conn;
   protected Table table;
-  List inputColumns;
-  List inputTypes;
-  private String fullyQualifiedTableName;
-  private Map updaters = new HashMap<>();
-  private Map partitionPaths = new HashMap<>();
-  private Set addedPartitions = new HashSet<>();
+  protected List inputColumns;
+  protected List inputTypes;
+  protected String fullyQualifiedTableName;
+  protected Map updaters = new HashMap<>();
+  protected Map partitionPaths = new HashMap<>();
+  protected Set addedPartitions = new HashSet<>();
   // input OI includes table columns + partition columns
-  private StructObjectInspector inputRowObjectInspector;
+  protected StructObjectInspector inputRowObjectInspector;
   // output OI strips off the partition columns and retains other columns
-  private ObjectInspector outputRowObjectInspector;
-  private List partitionColumns = new ArrayList<>();
-  private ObjectInspector[] partitionObjInspectors = null;
-  private StructField[] partitionStructFields = null;
-  private Object[] partitionFieldData;
-  private ObjectInspector[] bucketObjInspectors = null;
-  private StructField[] bucketStructFields = null;
-  private Object[] bucketFieldData;
-  private List bucketIds = new ArrayList<>();
-  private int totalBuckets;
-  private String defaultPartitionName;
-  private boolean isBucketed;
-  private AcidOutputFormat acidOutputFormat;
-  private Long curBatchMinWriteId;
-  private Long curBatchMaxWriteId;
-  private final String lineDelimiter;
-  private HeapMemoryMonitor heapMemoryMonitor;
+  protected ObjectInspector outputRowObjectInspector;
+  protected List partitionColumns = new ArrayList<>();
+  protected ObjectInspector[] partitionObjInspectors = null;
+  protected StructField[] partitionStructFields = null;
+  protected Object[] partitionFieldData;
+  protected ObjectInspector[] bucketObjInspectors = null;
+  protected StructField[] bucketStructFields = null;
+  protected Object[] bucketFieldData;
+  protected List bucketIds = new ArrayList<>();
+  protected int totalBuckets;
+  protected String defaultPartitionName;
+  protected boolean isBucketed;
+  protected AcidOutputFormat acidOutputFormat;
+  protected Long curBatchMinWriteId;
+  protected Long curBatchMaxWriteId;
+  protected final String lineDelimiter;
+  protected HeapMemoryMonitor heapMemoryMonitor;
   // if low memory canary is set and if records after set canary exceeds 
threshold, trigger a flush.
   // This is to avoid getting notified of low memory too often and flushing 
too often.
-  private AtomicBoolean lowMemoryCanary;
-  private long ingestSizeBytes = 0;
-  private boolean autoFlush;
-  private float memoryUsageThreshold;
-  private long ingestSizeThreshold;
+  protected AtomicBoolean lowMemoryCanary;
+  protected long ingestSizeBytes = 0;
+  protected boolean autoFlush;
+  protected float memoryUsageThreshold;
+  protected long ingestSizeThreshold;
 
   public AbstractRecordWriter(final String lineDelimiter) {
 this.lineDelimiter = lineDelimiter == null || lineDelimiter.isEmpty() ?
   DEFAULT_LINE_DELIMITER_PATTERN : lineDelimiter;

hive git commit: HIVE-19494: Accept shade prefix during reflective instantiation of output format (Prasanth Jayachandran reviewed by Sergey Shelukhin)

2018-05-14 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/master 52be32b00 -> 14da40097


HIVE-19494: Accept shade prefix during reflective instantiation of output 
format (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/14da4009
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/14da4009
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/14da4009

Branch: refs/heads/master
Commit: 14da400978c989673e50633a33f7262cf9509c72
Parents: 52be32b
Author: Prasanth Jayachandran 
Authored: Mon May 14 16:10:44 2018 -0700
Committer: Prasanth Jayachandran 
Committed: Mon May 14 16:10:44 2018 -0700

--
 .../apache/hadoop/hive/common/JavaUtils.java|  4 ++
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  3 ++
 .../hive/streaming/AbstractRecordWriter.java| 50 
 3 files changed, 37 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/14da4009/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java 
b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
index e09dec1..c011cd1 100644
--- a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
@@ -71,6 +71,10 @@ public final class JavaUtils {
 return classLoader;
   }
 
+  public static Class loadClass(String shadePrefix, String className) throws 
ClassNotFoundException {
+return loadClass(shadePrefix + "." + className);
+  }
+
   public static Class loadClass(String className) throws 
ClassNotFoundException {
 return loadClass(className, true);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/14da4009/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 9df9cca..0a997a1 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1911,6 +1911,9 @@ public class HiveConf extends Configuration {
   "Hive streaming ingest has auto flush mechanism to flush all open record 
updaters under memory pressure.\n" +
 "When memory usage exceed 
hive.heap.memory.monitor.default.usage.threshold, the auto-flush mechanism will 
\n" +
 "wait until this size (default 100Mb) of records are ingested before 
triggering flush."),
+HIVE_CLASSLOADER_SHADE_PREFIX("hive.classloader.shade.prefix", "", "During 
reflective instantiation of a class\n" +
+  "(input, output formats, serde etc.), when classloader throws 
ClassNotFoundException, as a fallback this\n" +
+  "shade prefix will be used before class reference and retried."),
 
 
HIVE_ORC_MS_FOOTER_CACHE_ENABLED("hive.orc.splits.ms.footer.cache.enabled", 
false,
 "Whether to enable using file metadata cache in metastore for ORC file 
footers."),

http://git-wip-us.apache.org/repos/asf/hive/blob/14da4009/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
--
diff --git 
a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java 
b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
index 685e0cc..b6c8890 100644
--- a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
+++ b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
@@ -128,31 +128,41 @@ public abstract class AbstractRecordWriter implements 
RecordWriter {
 if (conn == null) {
   throw new StreamingException("Streaming connection cannot be null during 
record writer initialization");
 }
+this.conn = conn;
+this.curBatchMinWriteId = minWriteId;
+this.curBatchMaxWriteId = maxWriteId;
+this.conf = conn.getHiveConf();
+this.defaultPartitionName = 
conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME);
+this.table = conn.getTable();
+this.inputColumns = 
table.getSd().getCols().stream().map(FieldSchema::getName).collect(Collectors.toList());
+this.inputTypes = 
table.getSd().getCols().stream().map(FieldSchema::getType).collect(Collectors.toList());
+if (conn.isPartitionedTable() && conn.isDynamicPartitioning()) {
+  this.partitionColumns = 
table.getPartitionKeys().stream().map(FieldSchema::getName)
+.collect(Collectors.toList());
+  this.inputColumns.addAll(partitionColumns);
+  this.inputTypes
+

hive git commit: HIVE-19494: Accept shade prefix during reflective instantiation of output format (Prasanth Jayachandran reviewed by Sergey Shelukhin)

2018-05-14 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/branch-3 c740e32fc -> 66f6748a0


HIVE-19494: Accept shade prefix during reflective instantiation of output 
format (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/66f6748a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/66f6748a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/66f6748a

Branch: refs/heads/branch-3
Commit: 66f6748a069aca27532b5e99721b6230145886db
Parents: c740e32
Author: Prasanth Jayachandran 
Authored: Mon May 14 16:11:18 2018 -0700
Committer: Prasanth Jayachandran 
Committed: Mon May 14 16:11:18 2018 -0700

--
 .../apache/hadoop/hive/common/JavaUtils.java|  4 ++
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  3 ++
 .../hive/streaming/AbstractRecordWriter.java| 50 
 3 files changed, 37 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/66f6748a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java 
b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
index e09dec1..c011cd1 100644
--- a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java
@@ -71,6 +71,10 @@ public final class JavaUtils {
 return classLoader;
   }
 
+  public static Class loadClass(String shadePrefix, String className) throws 
ClassNotFoundException {
+return loadClass(shadePrefix + "." + className);
+  }
+
   public static Class loadClass(String className) throws 
ClassNotFoundException {
 return loadClass(className, true);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/66f6748a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index e56c14f..b81c47d 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1911,6 +1911,9 @@ public class HiveConf extends Configuration {
   "Hive streaming ingest has auto flush mechanism to flush all open record 
updaters under memory pressure.\n" +
 "When memory usage exceed 
hive.heap.memory.monitor.default.usage.threshold, the auto-flush mechanism will 
\n" +
 "wait until this size (default 100Mb) of records are ingested before 
triggering flush."),
+HIVE_CLASSLOADER_SHADE_PREFIX("hive.classloader.shade.prefix", "", "During 
reflective instantiation of a class\n" +
+  "(input, output formats, serde etc.), when classloader throws 
ClassNotFoundException, as a fallback this\n" +
+  "shade prefix will be used before class reference and retried."),
 
 
HIVE_ORC_MS_FOOTER_CACHE_ENABLED("hive.orc.splits.ms.footer.cache.enabled", 
false,
 "Whether to enable using file metadata cache in metastore for ORC file 
footers."),

http://git-wip-us.apache.org/repos/asf/hive/blob/66f6748a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
--
diff --git 
a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java 
b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
index 685e0cc..b6c8890 100644
--- a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
+++ b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java
@@ -128,31 +128,41 @@ public abstract class AbstractRecordWriter implements 
RecordWriter {
 if (conn == null) {
   throw new StreamingException("Streaming connection cannot be null during 
record writer initialization");
 }
+this.conn = conn;
+this.curBatchMinWriteId = minWriteId;
+this.curBatchMaxWriteId = maxWriteId;
+this.conf = conn.getHiveConf();
+this.defaultPartitionName = 
conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME);
+this.table = conn.getTable();
+this.inputColumns = 
table.getSd().getCols().stream().map(FieldSchema::getName).collect(Collectors.toList());
+this.inputTypes = 
table.getSd().getCols().stream().map(FieldSchema::getType).collect(Collectors.toList());
+if (conn.isPartitionedTable() && conn.isDynamicPartitioning()) {
+  this.partitionColumns = 
table.getPartitionKeys().stream().map(FieldSchema::getName)
+.collect(Collectors.toList());
+  this.inputColumns.addAll(partitionColumns);
+  this.inputTypes
+

hive git commit: HIVE-19497: SessionHiveMetaStoreClient.getTable should respect default catalog (Dongjoon Hyun, reviewed by Alan Gates)

2018-05-14 Thread vgarg
Repository: hive
Updated Branches:
  refs/heads/master d65d2334f -> 52be32b00


HIVE-19497: SessionHiveMetaStoreClient.getTable should respect default catalog 
(Dongjoon Hyun, reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/52be32b0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/52be32b0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/52be32b0

Branch: refs/heads/master
Commit: 52be32b004ed488e702bb358a44f9a1d0f147f95
Parents: d65d233
Author: Dongjoon Hyun 
Authored: Mon May 14 15:17:15 2018 -0700
Committer: Vineet Garg 
Committed: Mon May 14 15:18:39 2018 -0700

--
 .../apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/52be32b0/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index 071756d..0d2ed54 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -166,7 +166,7 @@ public class SessionHiveMetaStoreClient extends 
HiveMetaStoreClient implements I
   return deepCopy(table);  // Original method used deepCopy(), do the same 
here.
 }
 // Try underlying client
-return super.getTable(DEFAULT_CATALOG_NAME, dbname, name);
+return super.getTable(MetaStoreUtils.getDefaultCatalog(conf), dbname, 
name);
   }
 
   // Need to override this one too or dropTable breaks because it doesn't find 
the table when checks



hive git commit: HIVE-19497: SessionHiveMetaStoreClient.getTable should respect default catalog (Dongjoon Hyun, reviewed by Alan Gates)

2018-05-14 Thread vgarg
Repository: hive
Updated Branches:
  refs/heads/branch-3 3e04cc117 -> c740e32fc


HIVE-19497: SessionHiveMetaStoreClient.getTable should respect default catalog 
(Dongjoon Hyun, reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c740e32f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c740e32f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c740e32f

Branch: refs/heads/branch-3
Commit: c740e32fcf46f7eba5200419baa68f624cfa3abe
Parents: 3e04cc1
Author: Dongjoon Hyun 
Authored: Mon May 14 15:17:15 2018 -0700
Committer: Vineet Garg 
Committed: Mon May 14 15:19:05 2018 -0700

--
 .../apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/c740e32f/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index 071756d..0d2ed54 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -166,7 +166,7 @@ public class SessionHiveMetaStoreClient extends 
HiveMetaStoreClient implements I
   return deepCopy(table);  // Original method used deepCopy(), do the same 
here.
 }
 // Try underlying client
-return super.getTable(DEFAULT_CATALOG_NAME, dbname, name);
+return super.getTable(MetaStoreUtils.getDefaultCatalog(conf), dbname, 
name);
   }
 
   // Need to override this one too or dropTable breaks because it doesn't find 
the table when checks



hive git commit: HIVE-19509: Disable tests that are failing continuously (addendum)

2018-05-14 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master b42636850 -> d65d2334f


HIVE-19509: Disable tests that are failing continuously (addendum)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d65d2334
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d65d2334
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d65d2334

Branch: refs/heads/master
Commit: d65d2334f737620f82f637610d5f7379e4a55f04
Parents: b426368
Author: Jesus Camacho Rodriguez 
Authored: Mon May 14 15:10:35 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Mon May 14 15:10:35 2018 -0700

--
 ql/src/test/queries/clientpositive/druidkafkamini_basic.q  | 1 -
 .../results/clientpositive/druid/druidkafkamini_basic.q.out| 6 +-
 2 files changed, 1 insertion(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d65d2334/ql/src/test/queries/clientpositive/druidkafkamini_basic.q
--
diff --git a/ql/src/test/queries/clientpositive/druidkafkamini_basic.q 
b/ql/src/test/queries/clientpositive/druidkafkamini_basic.q
index 4c30cdd..229a20c 100644
--- a/ql/src/test/queries/clientpositive/druidkafkamini_basic.q
+++ b/ql/src/test/queries/clientpositive/druidkafkamini_basic.q
@@ -72,4 +72,3 @@ FROM druid_kafka_test) b
 ) order by b.`user`;
 
 DROP TABLE druid_kafka_test;
-DROP TABLE druid_table_1;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/d65d2334/ql/src/test/results/clientpositive/druid/druidkafkamini_basic.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/druid/druidkafkamini_basic.q.out 
b/ql/src/test/results/clientpositive/druid/druidkafkamini_basic.q.out
index c2cc249..2e6d768 100644
--- a/ql/src/test/results/clientpositive/druid/druidkafkamini_basic.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidkafkamini_basic.q.out
@@ -71,7 +71,7 @@ kafkaPartitions=1
 activeTasks=[]  
 publishingTasks=[]  
 latestOffsets={0=10}
-minimumLag={}   
+minimumLag={0=0}
 aggregateLag=0  
  A masked pattern was here 
 PREHOOK: query: Select count(*) FROM druid_kafka_test
@@ -501,7 +501,3 @@ POSTHOOK: query: DROP TABLE druid_kafka_test
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@druid_kafka_test
 POSTHOOK: Output: default@druid_kafka_test
-PREHOOK: query: DROP TABLE druid_table_1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE druid_table_1
-POSTHOOK: type: DROPTABLE



hive git commit: HIVE-19474 : Decimal type should be casted as part of the CTAS or INSERT Clause (addendum)

2018-05-14 Thread vgarg
Repository: hive
Updated Branches:
  refs/heads/branch-3 a9105ced0 -> 3e04cc117


HIVE-19474 : Decimal type should be casted as part of the CTAS or INSERT Clause 
(addendum)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3e04cc11
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3e04cc11
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3e04cc11

Branch: refs/heads/branch-3
Commit: 3e04cc117e90c3e778d59cf96cccb1b4bbc6435f
Parents: a9105ce
Author: Jesus Camacho Rodriguez 
Authored: Mon May 14 14:44:51 2018 -0700
Committer: Vineet Garg 
Committed: Mon May 14 14:54:23 2018 -0700

--
 .../hadoop/hive/druid/serde/TestDruidSerDe.java | 24 
 1 file changed, 10 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3e04cc11/druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java
--
diff --git 
a/druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java 
b/druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java
index e4fa1a2..e45de0f 100644
--- 
a/druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java
+++ 
b/druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java
@@ -764,8 +764,8 @@ public class TestDruidSerDe {
   }
 
 
-  private static final String COLUMN_NAMES = 
"__time,c0,c1,c2,c3,c4,c5,c6,c7,c8,c9";
-  private static final String COLUMN_TYPES = "timestamp with local time 
zone,string,char(6),varchar(8),double,float,decimal(38,18),bigint,int,smallint,tinyint";
+  private static final String COLUMN_NAMES = 
"__time,c0,c1,c2,c3,c4,c5,c6,c7,c8";
+  private static final String COLUMN_TYPES = "timestamp with local time 
zone,string,char(6),varchar(8),double,float,bigint,int,smallint,tinyint";
   private static final Object[] ROW_OBJECT = new Object[] {
   new TimestampLocalTZWritable(new 
TimestampTZ(Instant.ofEpochMilli(137790720L).atZone(ZoneOffset.UTC))),
   new Text("dim1_val"),
@@ -773,7 +773,6 @@ public class TestDruidSerDe {
   new HiveVarcharWritable(new HiveVarchar("dim3_val", 8)),
   new DoubleWritable(10669.3D),
   new FloatWritable(10669.45F),
-  new HiveDecimalWritable(HiveDecimal.create(1064.34D)),
   new LongWritable(1113939),
   new IntWritable(1112123),
   new ShortWritable((short) 12),
@@ -788,11 +787,10 @@ public class TestDruidSerDe {
   .put("c2", "dim3_val")
   .put("c3", 10669.3D)
   .put("c4", 10669.45F)
-  .put("c5", 1064.34D)
-  .put("c6", 1113939L)
-  .put("c7", 1112123)
-  .put("c8", (short) 12)
-  .put("c9", (byte) 0)
+  .put("c5", 1113939L)
+  .put("c6", 1112123)
+  .put("c7", (short) 12)
+  .put("c8", (byte) 0)
   .put("__time_granularity", 137790720L)
   .build());
 
@@ -877,7 +875,6 @@ public class TestDruidSerDe {
   new HiveVarcharWritable(new HiveVarchar("dim3_val", 8)),
   new DoubleWritable(10669.3D),
   new FloatWritable(10669.45F),
-  new HiveDecimalWritable(HiveDecimal.create(1064.34D)),
   new LongWritable(1113939),
   new IntWritable(1112123),
   new ShortWritable((short) 12),
@@ -891,11 +888,10 @@ public class TestDruidSerDe {
   .put("c2", "dim3_val")
   .put("c3", 10669.3D)
   .put("c4", 10669.45F)
-  .put("c5", 1064.34D)
-  .put("c6", 1113939L)
-  .put("c7", 1112123)
-  .put("c8", (short) 12)
-  .put("c9", (byte) 0)
+  .put("c5", 1113939L)
+  .put("c6", 1112123)
+  .put("c7", (short) 12)
+  .put("c8", (byte) 0)
   .build());
 
   @Test



hive git commit: HIVE-19474 : Decimal type should be casted as part of the CTAS or INSERT Clause (addendum)

2018-05-14 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 042eb3988 -> b42636850


HIVE-19474 : Decimal type should be casted as part of the CTAS or INSERT Clause 
(addendum)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b4263685
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b4263685
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b4263685

Branch: refs/heads/master
Commit: b42636850a284268b5271b5d54c964050807928f
Parents: 042eb39
Author: Jesus Camacho Rodriguez 
Authored: Mon May 14 14:44:51 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Mon May 14 14:44:51 2018 -0700

--
 .../hadoop/hive/druid/serde/TestDruidSerDe.java | 24 
 1 file changed, 10 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b4263685/druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java
--
diff --git 
a/druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java 
b/druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java
index e4fa1a2..e45de0f 100644
--- 
a/druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java
+++ 
b/druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java
@@ -764,8 +764,8 @@ public class TestDruidSerDe {
   }
 
 
-  private static final String COLUMN_NAMES = 
"__time,c0,c1,c2,c3,c4,c5,c6,c7,c8,c9";
-  private static final String COLUMN_TYPES = "timestamp with local time 
zone,string,char(6),varchar(8),double,float,decimal(38,18),bigint,int,smallint,tinyint";
+  private static final String COLUMN_NAMES = 
"__time,c0,c1,c2,c3,c4,c5,c6,c7,c8";
+  private static final String COLUMN_TYPES = "timestamp with local time 
zone,string,char(6),varchar(8),double,float,bigint,int,smallint,tinyint";
   private static final Object[] ROW_OBJECT = new Object[] {
   new TimestampLocalTZWritable(new 
TimestampTZ(Instant.ofEpochMilli(137790720L).atZone(ZoneOffset.UTC))),
   new Text("dim1_val"),
@@ -773,7 +773,6 @@ public class TestDruidSerDe {
   new HiveVarcharWritable(new HiveVarchar("dim3_val", 8)),
   new DoubleWritable(10669.3D),
   new FloatWritable(10669.45F),
-  new HiveDecimalWritable(HiveDecimal.create(1064.34D)),
   new LongWritable(1113939),
   new IntWritable(1112123),
   new ShortWritable((short) 12),
@@ -788,11 +787,10 @@ public class TestDruidSerDe {
   .put("c2", "dim3_val")
   .put("c3", 10669.3D)
   .put("c4", 10669.45F)
-  .put("c5", 1064.34D)
-  .put("c6", 1113939L)
-  .put("c7", 1112123)
-  .put("c8", (short) 12)
-  .put("c9", (byte) 0)
+  .put("c5", 1113939L)
+  .put("c6", 1112123)
+  .put("c7", (short) 12)
+  .put("c8", (byte) 0)
   .put("__time_granularity", 137790720L)
   .build());
 
@@ -877,7 +875,6 @@ public class TestDruidSerDe {
   new HiveVarcharWritable(new HiveVarchar("dim3_val", 8)),
   new DoubleWritable(10669.3D),
   new FloatWritable(10669.45F),
-  new HiveDecimalWritable(HiveDecimal.create(1064.34D)),
   new LongWritable(1113939),
   new IntWritable(1112123),
   new ShortWritable((short) 12),
@@ -891,11 +888,10 @@ public class TestDruidSerDe {
   .put("c2", "dim3_val")
   .put("c3", 10669.3D)
   .put("c4", 10669.45F)
-  .put("c5", 1064.34D)
-  .put("c6", 1113939L)
-  .put("c7", 1112123)
-  .put("c8", (short) 12)
-  .put("c9", (byte) 0)
+  .put("c5", 1113939L)
+  .put("c6", 1112123)
+  .put("c7", (short) 12)
+  .put("c8", (byte) 0)
   .build());
 
   @Test



hive git commit: HIVE-19509: Disable tests that are failing continuously (Jesus Camacho Rodriguez) (addendum)

2018-05-14 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master b96487071 -> 042eb3988


HIVE-19509: Disable tests that are failing continuously (Jesus Camacho 
Rodriguez) (addendum)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/042eb398
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/042eb398
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/042eb398

Branch: refs/heads/master
Commit: 042eb39884dbcc4acb8af38dfdbd392c408527a8
Parents: b964870
Author: Jesus Camacho Rodriguez 
Authored: Mon May 14 14:25:58 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Mon May 14 14:25:58 2018 -0700

--
 .../src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java   | 2 ++
 itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java | 1 +
 .../main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java | 4 
 .../java/org/apache/hadoop/hive/metastore/TestOldSchema.java | 2 ++
 .../test/java/org/apache/hadoop/hive/metastore/TestStats.java| 2 ++
 5 files changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/042eb398/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
index ac28e43..0af91bd 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
@@ -185,6 +185,7 @@ public class TestAcidOnTez {
* Tests non acid to acid conversion where starting table has non-standard 
layout, i.e.
* where "original" files are not immediate children of the partition dir
*/
+  @Ignore("HIVE-19509: Disable tests that are failing continuously")
   @Test
   public void testNonStandardConversion01() throws Exception {
 HiveConf confForTez = new HiveConf(hiveConf); // make a clone of existing 
hive conf
@@ -431,6 +432,7 @@ public class TestAcidOnTez {
* {@link org.apache.hadoop.hive.ql.metadata.Hive#moveAcidFiles(FileSystem, 
FileStatus[], Path, List)} drops the union subdirs
* since each delta file has a unique name.
*/
+  @Ignore("HIVE-19509: Disable tests that are failing continuously")
   @Test
   public void testCtasTezUnion() throws Exception {
 HiveConf confForTez = new HiveConf(hiveConf); // make a clone of existing 
hive conf

http://git-wip-us.apache.org/repos/asf/hive/blob/042eb398/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java
--
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java
index 8aefef8..714fba2 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java
@@ -350,6 +350,7 @@ public class TestSSL {
* Start HS2 in Http mode with SSL enabled, open a SSL connection and fetch 
data
* @throws Exception
*/
+  @Ignore("HIVE-19509: Disable tests that are failing continuously")
   @Test
   public void testSSLFetchHttp() throws Exception {
 SSLTestUtils.setSslConfOverlay(confOverlay);

http://git-wip-us.apache.org/repos/asf/hive/blob/042eb398/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
--
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
index 1814f0f..ffa9b0e 100644
--- 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
+++ 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
@@ -58,6 +58,8 @@ public class CliConfigs {
 excludesFrom(testConfigProps, "druid.query.files");
 excludesFrom(testConfigProps, "druid.kafka.query.files");
 
+excludeQuery("fouter_join_ppr.q");
+
 setResultsDir("ql/src/test/results/clientpositive");
 setLogDir("itests/qtest/target/qfile-results/clientpositive");
 
@@ -225,6 +227,8 @@ public class CliConfigs {
 excludeQuery("sysdb.q");
 excludeQuery("tez_smb_1.q");
 excludeQuery("union_fast_stats.q");
+excludeQuery("schema_evol_orc_acidvec_part.q");
+excludeQuery("schema_evol_orc_vec_part_llap_io.q");
 
 setResultsDir("ql/src/test/results/clientpositive/llap");
 setLogDir("itests/qtest/target/qfile-results/clientpositive");


hive git commit: HIVE-19381: Function replication in cloud fail when download resource from AWS (Daniel Dai, reviewed by Thejas Nair)

2018-05-14 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/branch-3 6936c9c2a -> a9105ced0


HIVE-19381: Function replication in cloud fail when download resource from AWS 
(Daniel Dai, reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a9105ced
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a9105ced
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a9105ced

Branch: refs/heads/branch-3
Commit: a9105ced0c7d6684e590f536f2299435076d29c9
Parents: 6936c9c
Author: Daniel Dai 
Authored: Mon May 14 13:25:58 2018 -0700
Committer: Daniel Dai 
Committed: Mon May 14 13:25:58 2018 -0700

--
 ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java   | 6 ++
 .../java/org/apache/hadoop/hive/ql/session/SessionState.java   | 5 -
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/a9105ced/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java
index a0a90a9..b9d6f58 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java
@@ -30,6 +30,7 @@ import com.google.common.collect.Multimap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.ResourceType;
@@ -165,7 +166,9 @@ public class FunctionTask extends Task {
 checkLocalFunctionResources(db, createFunctionDesc.getResources());
 
 FunctionInfo registered = null;
+HiveConf oldConf = SessionState.get().getConf();
 try {
+  SessionState.get().setConf(conf);
   registered = FunctionRegistry.registerPermanentFunction(
 registeredName, className, true, toFunctionResource(resources));
 } catch (RuntimeException ex) {
@@ -173,7 +176,10 @@ public class FunctionTask extends Task {
   while (t.getCause() != null) {
 t = t.getCause();
   }
+} finally {
+  SessionState.get().setConf(oldConf);
 }
+
 if (registered == null) {
   console.printError("Failed to register " + registeredName
   + " using class " + createFunctionDesc.getClassName());

http://git-wip-us.apache.org/repos/asf/hive/blob/a9105ced/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java 
b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 6bb756c..89129f9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -135,7 +135,7 @@ public class SessionState {
   /**
* current configuration.
*/
-  private final HiveConf sessionConf;
+  private HiveConf sessionConf;
 
   /**
* silent mode.
@@ -308,6 +308,9 @@ public class SessionState {
 return sessionConf;
   }
 
+  public void setConf(HiveConf conf) {
+this.sessionConf = conf;
+  }
 
   public File getTmpOutputFile() {
 return tmpOutputFile;



hive git commit: HIVE-19381: Function replication in cloud fail when download resource from AWS (Daniel Dai, reviewed by Thejas Nair)

2018-05-14 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/master f6c868a10 -> b96487071


HIVE-19381: Function replication in cloud fail when download resource from AWS 
(Daniel Dai, reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b9648707
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b9648707
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b9648707

Branch: refs/heads/master
Commit: b96487071f7dc8fdf5aa5a44f90669024fbea9a8
Parents: f6c868a
Author: Daniel Dai 
Authored: Mon May 14 13:25:12 2018 -0700
Committer: Daniel Dai 
Committed: Mon May 14 13:25:12 2018 -0700

--
 ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java   | 6 ++
 .../java/org/apache/hadoop/hive/ql/session/SessionState.java   | 5 -
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b9648707/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java
index a0a90a9..b9d6f58 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java
@@ -30,6 +30,7 @@ import com.google.common.collect.Multimap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.ResourceType;
@@ -165,7 +166,9 @@ public class FunctionTask extends Task {
 checkLocalFunctionResources(db, createFunctionDesc.getResources());
 
 FunctionInfo registered = null;
+HiveConf oldConf = SessionState.get().getConf();
 try {
+  SessionState.get().setConf(conf);
   registered = FunctionRegistry.registerPermanentFunction(
 registeredName, className, true, toFunctionResource(resources));
 } catch (RuntimeException ex) {
@@ -173,7 +176,10 @@ public class FunctionTask extends Task {
   while (t.getCause() != null) {
 t = t.getCause();
   }
+} finally {
+  SessionState.get().setConf(oldConf);
 }
+
 if (registered == null) {
   console.printError("Failed to register " + registeredName
   + " using class " + createFunctionDesc.getClassName());

http://git-wip-us.apache.org/repos/asf/hive/blob/b9648707/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java 
b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 6bb756c..89129f9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -135,7 +135,7 @@ public class SessionState {
   /**
* current configuration.
*/
-  private final HiveConf sessionConf;
+  private HiveConf sessionConf;
 
   /**
* silent mode.
@@ -308,6 +308,9 @@ public class SessionState {
 return sessionConf;
   }
 
+  public void setConf(HiveConf conf) {
+this.sessionConf = conf;
+  }
 
   public File getTmpOutputFile() {
 return tmpOutputFile;



hive git commit: HIVE-19108 : Vectorization and Parquet: Turning on vectorization in parquet_ppd_decimal.q causes Wrong Query Results (Haifeng Chen reviewed by Matt McCline and Vihang Karajgaonkar)

2018-05-14 Thread vihangk1
Repository: hive
Updated Branches:
  refs/heads/branch-3 d33f79fe5 -> 6936c9c2a


HIVE-19108 : Vectorization and Parquet: Turning on vectorization in 
parquet_ppd_decimal.q causes Wrong Query Results (Haifeng Chen reviewed by Matt 
McCline and Vihang Karajgaonkar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6936c9c2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6936c9c2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6936c9c2

Branch: refs/heads/branch-3
Commit: 6936c9c2a14c34665c4e8f20f8fc3b8b2af6afaa
Parents: d33f79f
Author: Vihang Karajgaonkar 
Authored: Thu May 10 08:25:53 2018 -0700
Committer: Vihang Karajgaonkar 
Committed: Mon May 14 12:54:59 2018 -0700

--
 .../vector/expressions/CastDecimalToFloat.java  |  43 ++
 .../apache/hadoop/hive/ql/udf/UDFToFloat.java   |   4 +-
 .../vector/expressions/TestVectorTypeCasts.java |  48 ++
 .../clientpositive/parquet_ppd_decimal.q|   1 -
 .../vectorization_parquet_ppd_decimal.q | 169 
 .../clientpositive/llap/vector_decimal_1.q.out  |   2 +-
 .../clientpositive/llap/vector_decimal_2.q.out  | 108 +--
 .../llap/vector_decimal_expressions.q.out   |   4 +-
 .../clientpositive/vector_decimal_1.q.out   |   2 +-
 .../vector_decimal_expressions.q.out|   4 +-
 .../vectorization_parquet_ppd_decimal.q.out | 766 +++
 11 files changed, 1088 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/6936c9c2/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToFloat.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToFloat.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToFloat.java
new file mode 100644
index 000..4ef5422
--- /dev/null
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToFloat.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector.expressions;
+
+import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
+
+/**
+ * Cast a decimal to float based on decimal to double function.
+ *
+ */
+public class CastDecimalToFloat extends FuncDecimalToDouble {
+
+  private static final long serialVersionUID = 1L;
+
+  public CastDecimalToFloat() {
+super();
+  }
+
+  public CastDecimalToFloat(int inputCol, int outputColumnNum) {
+super(inputCol, outputColumnNum);
+  }
+
+  protected void func(DoubleColumnVector outV, DecimalColumnVector inV, int i) 
{
+outV.vector[i] = inV.vector[i].floatValue();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/6936c9c2/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
index fd49d1f..2872ff2 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hive.ql.udf;
 
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.CastDecimalToDouble;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.CastDecimalToFloat;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.CastStringToFloat;
 import 
org.apache.hadoop.hive.ql.exec.vector.expressions.gen.CastLongToFloatViaLongToDouble;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.CastTimestampToDouble;
@@ -42,7 +42,7 @@ import org.apache.hadoop.io.Text;
  *
  */
 @VectorizedExpressions({CastTimestampToDouble.class, 
CastLongToFloatViaLongToDouble.class,
-

hive git commit: HIVE-19390 : Useless error messages logged for dummy table stats (Ashutosh Chauhan via Jesus Camacho Rodriguez)

2018-05-14 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/master 50f52b728 -> f6c868a10


HIVE-19390 : Useless error messages logged for dummy table stats (Ashutosh 
Chauhan via Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f6c868a1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f6c868a1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f6c868a1

Branch: refs/heads/master
Commit: f6c868a10da04515e5b087b174a4fab7a433f745
Parents: 50f52b7
Author: Ashutosh Chauhan 
Authored: Wed May 2 17:32:00 2018 -0700
Committer: Ashutosh Chauhan 
Committed: Mon May 14 12:45:41 2018 -0700

--
 .../java/org/apache/hadoop/hive/ql/stats/StatsUtils.java | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f6c868a1/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
index cef87f5..952b4ab 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.metadata.PartitionIterable;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ColumnStatsList;
 import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
+import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
 import org.apache.hadoop.hive.ql.plan.ColStatistics;
 import org.apache.hadoop.hive.ql.plan.ColStatistics.Range;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
@@ -1054,8 +1055,8 @@ public class StatsUtils {
   cs.setAvgColLen(getAvgColLenOf(conf,cinfo.getObjectInspector(), 
cinfo.getTypeName()));
 } else if (colTypeLowerCase.equals(serdeConstants.BOOLEAN_TYPE_NAME)) {
 cs.setCountDistint(2);
-cs.setNumTrues(Math.max(1, (long)numRows/2));
-cs.setNumFalses(Math.max(1, (long)numRows/2));
+cs.setNumTrues(Math.max(1, numRows/2));
+cs.setNumFalses(Math.max(1, numRows/2));
 cs.setAvgColLen(JavaDataModel.get().primitive1());
 } else if (colTypeLowerCase.equals(serdeConstants.TIMESTAMP_TYPE_NAME) ||
 colTypeLowerCase.equals(serdeConstants.TIMESTAMPLOCALTZ_TYPE_NAME)) {
@@ -1117,6 +1118,12 @@ public class StatsUtils {
 // Retrieve stats from metastore
 String dbName = table.getDbName();
 String tabName = table.getTableName();
+if (SemanticAnalyzer.DUMMY_DATABASE.equals(dbName) &&
+SemanticAnalyzer.DUMMY_TABLE.equals(tabName)) {
+  // insert into values gets written into insert from select dummy_table
+  // This table is dummy and has no stats
+  return null;
+}
 List stats = null;
 try {
   List colStat = Hive.get().getTableColumnStatistics(



hive git commit: HIVE-19410 : don't create serde reader in LLAP if there's no cache (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

2018-05-14 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/branch-3 14d0690b6 -> d33f79fe5


HIVE-19410 : don't create serde reader in LLAP if there's no cache (Sergey 
Shelukhin, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d33f79fe
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d33f79fe
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d33f79fe

Branch: refs/heads/branch-3
Commit: d33f79fe5307fa7044efaba64d666acf9be3a72a
Parents: 14d0690
Author: sergey 
Authored: Mon May 7 12:09:04 2018 -0700
Committer: Matt McCline 
Committed: Mon May 14 14:11:34 2018 -0500

--
 .../java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java  | 3 ++-
 .../hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java   | 1 +
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d33f79fe/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
index 747b399..5003d9b 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
@@ -133,7 +133,8 @@ public class LlapIoImpl implements 
LlapIo {
 MetadataCache metadataCache = null;
 SerDeLowLevelCacheImpl serdeCache = null; // TODO: extract interface when 
needed
 BufferUsageManager bufferManagerOrc = null, bufferManagerGeneric = null;
-boolean isEncodeEnabled = HiveConf.getBoolVar(conf, 
ConfVars.LLAP_IO_ENCODE_ENABLED);
+boolean isEncodeEnabled = useLowLevelCache
+&& HiveConf.getBoolVar(conf, ConfVars.LLAP_IO_ENCODE_ENABLED);
 if (useLowLevelCache) {
   // Memory manager uses cache policy to trigger evictions, so create the 
policy first.
   boolean useLrfu = HiveConf.getBoolVar(conf, 
HiveConf.ConfVars.LLAP_USE_LRFU);

http://git-wip-us.apache.org/repos/asf/hive/blob/d33f79fe/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java
index 8b89ae9..1cfe929 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java
@@ -178,6 +178,7 @@ public class SerDeEncodedDataReader extends 
CallableWithNdc
   InputFormat sourceInputFormat, Deserializer sourceSerDe,
   QueryFragmentCounters counters, TypeDescription schema, Map parts)
   throws IOException {
+assert cache != null;
 this.cache = cache;
 this.bufferManager = bufferManager;
 this.bufferFactory = new BufferObjectFactory() {



hive git commit: Revert "HIVE-13745: UDF current_date、current_timestamp、unix_timestamp NPE (Biao Wu, reviewed by Yongzhi Chen)"

2018-05-14 Thread ychena
Repository: hive
Updated Branches:
  refs/heads/master 0d787cbc0 -> 50f52b728


Revert "HIVE-13745: UDF current_date、current_timestamp、unix_timestamp NPE 
(Biao Wu, reviewed by Yongzhi Chen)"

This reverts commit fb79870592d775cd836d5611e21ab1c7030aadba.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/50f52b72
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/50f52b72
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/50f52b72

Branch: refs/heads/master
Commit: 50f52b728f911634e03b8ff6251c15edf3b987cb
Parents: 0d787cb
Author: Yongzhi Chen 
Authored: Mon May 14 14:56:15 2018 -0400
Committer: Yongzhi Chen 
Committed: Mon May 14 14:56:15 2018 -0400

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  1 -
 .../hadoop/hive/ql/session/SessionState.java|  1 -
 .../ql/udf/generic/GenericUDFCurrentDate.java   | 26 +--
 .../udf/generic/GenericUDFCurrentTimestamp.java | 26 +--
 .../ql/udf/generic/GenericUDFUnixTimeStamp.java | 27 +---
 5 files changed, 3 insertions(+), 78 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/50f52b72/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 514257f..9df9cca 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1843,7 +1843,6 @@ public class HiveConf extends Configuration {
 TESTMODE_BUCKET_CODEC_VERSION("hive.test.bucketcodec.version", 1,
   "For testing only.  Will make ACID subsystem write 
RecordIdentifier.bucketId in specified\n" +
 "format", false),
-HIVE_QUERY_TIMESTAMP("hive.query.timestamp", System.currentTimeMillis(), 
"query execute time."),
 
 HIVEMERGEMAPFILES("hive.merge.mapfiles", true,
 "Merge small files at the end of a map-only job"),

http://git-wip-us.apache.org/repos/asf/hive/blob/50f52b72/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java 
b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 9f65a77..6bb756c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -1924,7 +1924,6 @@ public class SessionState {
*/
   public void setupQueryCurrentTimestamp() {
 queryCurrentTimestamp = new Timestamp(System.currentTimeMillis());
-sessionConf.setLongVar(ConfVars.HIVE_QUERY_TIMESTAMP, 
queryCurrentTimestamp.getTime());
 
 // Provide a facility to set current timestamp during tests
 if (sessionConf.getBoolVar(ConfVars.HIVE_IN_TEST)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/50f52b72/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCurrentDate.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCurrentDate.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCurrentDate.java
index 91fd08f..7d3c3f4 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCurrentDate.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCurrentDate.java
@@ -18,12 +18,8 @@
 package org.apache.hadoop.hive.ql.udf.generic;
 
 import java.sql.Date;
-import java.sql.Timestamp;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.Description;
-import org.apache.hadoop.hive.ql.exec.MapredContext;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -43,13 +39,6 @@ import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn
 public class GenericUDFCurrentDate extends GenericUDF {
 
   protected DateWritable currentDate;
-  private Configuration conf;
-
-  @Override
-  public void configure(MapredContext context) {
-super.configure(context);
-conf = context.getJobConf();
-  }
 
   @Override
   public ObjectInspector initialize(ObjectInspector[] arguments)
@@ -61,21 +50,8 @@ public class GenericUDFCurrentDate extends GenericUDF {
 }
 
 if (currentDate == null) {
-  SessionState ss = SessionState.get();
-  Timestamp queryTimestamp;
-  if (ss == null) {
-if (conf == null) {
-  queryTimestamp = new 

hive git commit: HIVE-19374: Parse and process ALTER TABLE SET OWNER command syntax (Sergio Pena, reviewed by Vihang Karajgaonkar)

2018-05-14 Thread vgarg
Repository: hive
Updated Branches:
  refs/heads/branch-3 ba7155d33 -> 14d0690b6


HIVE-19374: Parse and process ALTER TABLE SET OWNER command syntax (Sergio 
Pena, reviewed by Vihang Karajgaonkar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/14d0690b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/14d0690b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/14d0690b

Branch: refs/heads/branch-3
Commit: 14d0690b6af32c1aed8085590f49af941a64bc1c
Parents: ba7155d
Author: Sergio Pena 
Authored: Wed May 9 22:37:09 2018 -0700
Committer: Vineet Garg 
Committed: Mon May 14 11:50:50 2018 -0700

--
 .../ql/metadata/TestAlterTableMetadata.java | 72 
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  5 ++
 .../apache/hadoop/hive/ql/metadata/Table.java   | 15 
 .../formatting/JsonMetaDataFormatter.java   |  1 +
 .../formatting/MetaDataFormatUtils.java |  1 +
 .../hive/ql/parse/DDLSemanticAnalyzer.java  | 17 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g|  8 +++
 .../hive/ql/parse/SemanticAnalyzerFactory.java  |  2 +
 .../hadoop/hive/ql/plan/AlterTableDesc.java | 21 +-
 .../hadoop/hive/ql/plan/HiveOperation.java  |  1 +
 .../authorization/plugin/HiveOperationType.java |  1 +
 .../plugin/sqlstd/Operation2Privilege.java  |  2 +
 12 files changed, 145 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/14d0690b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestAlterTableMetadata.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestAlterTableMetadata.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestAlterTableMetadata.java
new file mode 100644
index 000..940a1d3
--- /dev/null
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestAlterTableMetadata.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.metadata;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.ql.DriverFactory;
+import org.apache.hadoop.hive.ql.IDriver;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestAlterTableMetadata {
+  @Test
+  public void testAlterTableOwner() throws HiveException {
+/*
+ * This test verifies that the ALTER TABLE ... SET OWNER command will 
change the
+ * owner metadata of the table in HMS.
+ */
+
+HiveConf conf = new HiveConf(this.getClass());
+conf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+SessionState.start(conf);
+IDriver driver = DriverFactory.newDriver(conf);
+CommandProcessorResponse resp;
+Table table;
+
+resp = driver.run("create table t1(id int)");
+assertEquals(0, resp.getResponseCode());
+
+// Changes the owner to a user and verify the change
+resp = driver.run("alter table t1 set owner user u1");
+assertEquals(0, resp.getResponseCode());
+
+table = Hive.get(conf).getTable("t1");
+assertEquals(PrincipalType.USER, table.getOwnerType());
+assertEquals("u1", table.getOwner());
+
+// Changes the owner to a group and verify the change
+resp = driver.run("alter table t1 set owner group g1");
+assertEquals(0, resp.getResponseCode());
+
+table = Hive.get(conf).getTable("t1");
+assertEquals(PrincipalType.GROUP, table.getOwnerType());
+assertEquals("g1", table.getOwner());
+
+// Changes the owner to a role and verify the change
+resp = driver.run("alter table t1 set owner role r1");
+assertEquals(0, resp.getResponseCode());
+
+table = Hive.get(conf).getTable("t1");
+

hive git commit: HIVE-19466: Update constraint violation error message(Vineet Garg, reviewed by Jesus Camacho Rodriguez)

2018-05-14 Thread vgarg
Repository: hive
Updated Branches:
  refs/heads/branch-3 8c12a11b7 -> ba7155d33


HIVE-19466: Update constraint violation error message(Vineet Garg, reviewed by 
Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ba7155d3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ba7155d3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ba7155d3

Branch: refs/heads/branch-3
Commit: ba7155d33ba6598bfad99403af76296fc52483f4
Parents: 8c12a11
Author: Vineet Garg 
Authored: Thu May 10 14:12:04 2018 -0700
Committer: Vineet Garg 
Committed: Mon May 14 11:46:19 2018 -0700

--
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |  2 +-
 .../generic/GenericUDFEnforceConstraint.java| 77 
 .../GenericUDFEnforceNotNullConstraint.java | 77 
 .../TestGenericUDFEnforceConstraint.java| 75 +++
 .../TestGenericUDFEnforceNotNullConstraint.java | 75 ---
 .../alter_notnull_constraint_violation.q.out|  2 +-
 .../insert_into_acid_notnull.q.out  |  2 +-
 .../insert_into_notnull_constraint.q.out|  2 +-
 .../insert_multi_into_notnull.q.out |  2 +-
 .../insert_overwrite_notnull_constraint.q.out   |  2 +-
 .../update_notnull_constraint.q.out |  2 +-
 11 files changed, 159 insertions(+), 159 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ba7155d3/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index 4611ce9..a1f549a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -214,7 +214,7 @@ public final class FunctionRegistry {
 system.registerUDF("rand", UDFRand.class, false);
 system.registerGenericUDF("abs", GenericUDFAbs.class);
 system.registerGenericUDF("sq_count_check", GenericUDFSQCountCheck.class);
-system.registerGenericUDF("enforce_constraint", 
GenericUDFEnforceNotNullConstraint.class);
+system.registerGenericUDF("enforce_constraint", 
GenericUDFEnforceConstraint.class);
 system.registerGenericUDF("pmod", GenericUDFPosMod.class);
 
 system.registerUDF("ln", UDFLn.class, false);

http://git-wip-us.apache.org/repos/asf/hive/blob/ba7155d3/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEnforceConstraint.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEnforceConstraint.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEnforceConstraint.java
new file mode 100644
index 000..aa0059b
--- /dev/null
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEnforceConstraint.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
+import org.apache.hadoop.hive.ql.exec.errors.DataConstraintViolationError;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.BooleanObjectInspector;
+import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.io.BooleanWritable;
+
+/**
+ * GenericUDFAbs.
+ *
+ */
+@Description(name = "enforce_constraint",
+value = "_FUNC_(x) - Internal UDF to enforce CHECK and NOT NULL 
constraint",
+extended = "For internal use only")
+public class GenericUDFEnforceConstraint extends GenericUDF {
+  private 

hive git commit: HIVE-19433: HiveJoinPushTransitivePredicatesRule hangs (Vineet Garg, reviewed by Jesus Camacho Rodriguez)

2018-05-14 Thread vgarg
Repository: hive
Updated Branches:
  refs/heads/branch-3 71d211d2d -> 8c12a11b7


HIVE-19433: HiveJoinPushTransitivePredicatesRule hangs (Vineet Garg,reviewed by 
Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8c12a11b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8c12a11b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8c12a11b

Branch: refs/heads/branch-3
Commit: 8c12a11b762f5954f75eb35c10cb740dab632554
Parents: 71d211d
Author: Vineet Garg 
Authored: Mon May 14 11:37:05 2018 -0700
Committer: Vineet Garg 
Committed: Mon May 14 11:40:36 2018 -0700

--
 .../calcite/stats/HiveRelMdPredicates.java  |   6 +-
 .../queries/clientpositive/infer_join_preds.q   | 222 +++
 .../clientpositive/infer_join_preds.q.out   | 594 +++
 3 files changed, 820 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8c12a11b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
index 0b1fe74..a137bdf 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
@@ -642,7 +642,7 @@ public class HiveRelMdPredicates implements 
MetadataHandlerhttp://git-wip-us.apache.org/repos/asf/hive/blob/8c12a11b/ql/src/test/queries/clientpositive/infer_join_preds.q
--
diff --git a/ql/src/test/queries/clientpositive/infer_join_preds.q 
b/ql/src/test/queries/clientpositive/infer_join_preds.q
index c2e0d09..a9f5468 100644
--- a/ql/src/test/queries/clientpositive/infer_join_preds.q
+++ b/ql/src/test/queries/clientpositive/infer_join_preds.q
@@ -59,3 +59,225 @@ select * from
 (select * from src)a
 right outer join
 (select * from src1 where 1 = 0)b on a.key = b.key;
+
+explain select * from src join src1 on src.key = src1.key and src.value = 
src1.value
+where 4 between src.key and src.value;
+
+CREATE TABLE `table1`(
+   `idp_warehouse_id` bigint,
+   `idp_audit_id` bigint,
+   `idp_effective_date` date,
+   `idp_end_date` date,
+   `idp_delete_date` date,
+   `pruid` varchar(32),
+   `prid` bigint,
+   `prtimesheetid` bigint,
+   `prassignmentid` bigint,
+   `prchargecodeid` bigint,
+   `prtypecodeid` bigint,
+   `prsequence` bigint,
+   `prmodby` varchar(96),
+   `prmodtime` timestamp,
+   `prrmexported` bigint,
+   `prrmckdel` bigint,
+   `slice_status` int,
+   `role_id` bigint,
+   `user_lov1` varchar(30),
+   `user_lov2` varchar(30),
+   `incident_id` bigint,
+   `incident_investment_id` bigint,
+   `odf_ss_actuals` bigint,
+   `practsum` decimal(38,20));
+
+CREATE TABLE `table2`(
+   `idp_warehouse_id` bigint,
+   `idp_audit_id` bigint,
+   `idp_effective_date` date,
+   `idp_end_date` date,
+   `idp_delete_date` date,
+   `pruid` varchar(32),
+   `prid` bigint,
+   `prtimesheetid` bigint,
+   `prassignmentid` bigint,
+   `prchargecodeid` bigint,
+   `prtypecodeid` bigint,
+   `prsequence` bigint,
+   `prmodby` varchar(96),
+   `prmodtime` timestamp,
+   `prrmexported` bigint,
+   `prrmckdel` bigint,
+   `slice_status` int,
+   `role_id` bigint,
+   `user_lov1` varchar(30),
+   `user_lov2` varchar(30),
+   `incident_id` bigint,
+   `incident_investment_id` bigint,
+   `odf_ss_actuals` bigint,
+   `practsum` decimal(38,20));
+
+explain SELECT  s.idp_warehouse_id AS source_warehouse_id
+FROMtable1 s
+JOIN
+
+   table2 d
+ON  (
+s.prid = d.prid )
+JOIN
+ table2 e
+ON
+s.prid = e.prid
+WHERE
+concat(
+CASE
+WHEN s.prid IS NULL THEN 1
+ELSE s.prid
+END,',',
+CASE
+WHEN s.prtimesheetid IS NULL THEN 1
+ELSE s.prtimesheetid
+END,',',
+CASE
+WHEN s.prassignmentid IS NULL THEN 1
+ELSE s.prassignmentid
+END,',',
+

hive git commit: HIVE-19433: HiveJoinPushTransitivePredicatesRule hangs (Vineet Garg, reviewed by Jesus Camacho Rodriguez)

2018-05-14 Thread vgarg
Repository: hive
Updated Branches:
  refs/heads/master 8657a1c33 -> 0d787cbc0


HIVE-19433: HiveJoinPushTransitivePredicatesRule hangs (Vineet Garg,reviewed by 
Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0d787cbc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0d787cbc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0d787cbc

Branch: refs/heads/master
Commit: 0d787cbc055eb237bcccd5fdbc144fb6b1d7d4ca
Parents: 8657a1c
Author: Vineet Garg 
Authored: Mon May 14 11:37:05 2018 -0700
Committer: Vineet Garg 
Committed: Mon May 14 11:37:05 2018 -0700

--
 .../calcite/stats/HiveRelMdPredicates.java  |   6 +-
 .../queries/clientpositive/infer_join_preds.q   | 222 +++
 .../clientpositive/infer_join_preds.q.out   | 594 +++
 3 files changed, 820 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0d787cbc/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
index 0b1fe74..a137bdf 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
@@ -642,7 +642,7 @@ public class HiveRelMdPredicates implements 
MetadataHandlerhttp://git-wip-us.apache.org/repos/asf/hive/blob/0d787cbc/ql/src/test/queries/clientpositive/infer_join_preds.q
--
diff --git a/ql/src/test/queries/clientpositive/infer_join_preds.q 
b/ql/src/test/queries/clientpositive/infer_join_preds.q
index 89827a2..4787de1 100644
--- a/ql/src/test/queries/clientpositive/infer_join_preds.q
+++ b/ql/src/test/queries/clientpositive/infer_join_preds.q
@@ -61,3 +61,225 @@ select * from
 (select * from src)a
 right outer join
 (select * from src1 where 1 = 0)b on a.key = b.key;
+
+explain select * from src join src1 on src.key = src1.key and src.value = 
src1.value
+where 4 between src.key and src.value;
+
+CREATE TABLE `table1`(
+   `idp_warehouse_id` bigint,
+   `idp_audit_id` bigint,
+   `idp_effective_date` date,
+   `idp_end_date` date,
+   `idp_delete_date` date,
+   `pruid` varchar(32),
+   `prid` bigint,
+   `prtimesheetid` bigint,
+   `prassignmentid` bigint,
+   `prchargecodeid` bigint,
+   `prtypecodeid` bigint,
+   `prsequence` bigint,
+   `prmodby` varchar(96),
+   `prmodtime` timestamp,
+   `prrmexported` bigint,
+   `prrmckdel` bigint,
+   `slice_status` int,
+   `role_id` bigint,
+   `user_lov1` varchar(30),
+   `user_lov2` varchar(30),
+   `incident_id` bigint,
+   `incident_investment_id` bigint,
+   `odf_ss_actuals` bigint,
+   `practsum` decimal(38,20));
+
+CREATE TABLE `table2`(
+   `idp_warehouse_id` bigint,
+   `idp_audit_id` bigint,
+   `idp_effective_date` date,
+   `idp_end_date` date,
+   `idp_delete_date` date,
+   `pruid` varchar(32),
+   `prid` bigint,
+   `prtimesheetid` bigint,
+   `prassignmentid` bigint,
+   `prchargecodeid` bigint,
+   `prtypecodeid` bigint,
+   `prsequence` bigint,
+   `prmodby` varchar(96),
+   `prmodtime` timestamp,
+   `prrmexported` bigint,
+   `prrmckdel` bigint,
+   `slice_status` int,
+   `role_id` bigint,
+   `user_lov1` varchar(30),
+   `user_lov2` varchar(30),
+   `incident_id` bigint,
+   `incident_investment_id` bigint,
+   `odf_ss_actuals` bigint,
+   `practsum` decimal(38,20));
+
+explain SELECT  s.idp_warehouse_id AS source_warehouse_id
+FROMtable1 s
+JOIN
+
+   table2 d
+ON  (
+s.prid = d.prid )
+JOIN
+ table2 e
+ON
+s.prid = e.prid
+WHERE
+concat(
+CASE
+WHEN s.prid IS NULL THEN 1
+ELSE s.prid
+END,',',
+CASE
+WHEN s.prtimesheetid IS NULL THEN 1
+ELSE s.prtimesheetid
+END,',',
+CASE
+WHEN s.prassignmentid IS NULL THEN 1
+ELSE s.prassignmentid
+END,',',
+

hive git commit: HIVE-19474 : Decimal type should be casted as part of the CTAS or INSERT Clause. (Slim Bouguerra via Jesus Camacho Rodriguez)

2018-05-14 Thread vgarg
Repository: hive
Updated Branches:
  refs/heads/branch-3 1db0521b5 -> 71d211d2d


HIVE-19474 : Decimal type should be casted as part of the CTAS or INSERT 
Clause. (Slim Bouguerra via Jesus Camacho Rodriguez)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/71d211d2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/71d211d2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/71d211d2

Branch: refs/heads/branch-3
Commit: 71d211d2dbf53031da27aec562b4fba48939841d
Parents: 1db0521
Author: Slim Bouguerra 
Authored: Mon May 14 09:34:14 2018 -0700
Committer: Vineet Garg 
Committed: Mon May 14 11:13:14 2018 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  3 -
 .../hive/druid/DruidStorageHandlerUtils.java| 15 ++---
 .../hadoop/hive/druid/serde/DruidSerDe.java | 60 
 3 files changed, 28 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/71d211d2/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 3bb1e80..e56c14f 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2682,9 +2682,6 @@ public class HiveConf extends Configuration {
 "Wait time in ms default to 30 seconds."
 ),
 HIVE_DRUID_BITMAP_FACTORY_TYPE("hive.druid.bitmap.type", "roaring", new 
PatternSet("roaring", "concise"), "Coding algorithm use to encode the bitmaps"),
-HIVE_DRUID_APPROX_RESULT("hive.druid.approx.result", false,
-"Whether to allow approximate results from druid. \n" +
-"When set to true decimals will be stored as double and druid is 
allowed to return approximate results for decimal columns."),
 // For HBase storage handler
 HIVE_HBASE_WAL_ENABLED("hive.hbase.wal.enabled", true,
 "Whether writes to HBase should be forced to the write-ahead log. \n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/71d211d2/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
--
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
index 93d3e5c..076f00a 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
@@ -817,8 +817,6 @@ public final class DruidStorageHandlerUtils {
 // Default, all columns that are not metrics or timestamp, are treated as 
dimensions
 final List dimensions = new ArrayList<>();
 ImmutableList.Builder aggregatorFactoryBuilder = 
ImmutableList.builder();
-final boolean approximationAllowed = HiveConf
-.getBoolVar(jc, HiveConf.ConfVars.HIVE_DRUID_APPROX_RESULT);
 for (int i = 0; i < columnTypes.size(); i++) {
   final PrimitiveObjectInspector.PrimitiveCategory primitiveCategory = 
((PrimitiveTypeInfo) columnTypes
   .get(i)).getPrimitiveCategory();
@@ -835,15 +833,10 @@ public final class DruidStorageHandlerUtils {
 af = new DoubleSumAggregatorFactory(columnNames.get(i), 
columnNames.get(i));
 break;
   case DECIMAL:
-if (approximationAllowed) {
-  af = new DoubleSumAggregatorFactory(columnNames.get(i), 
columnNames.get(i));
-} else {
-  throw new UnsupportedOperationException(
-  String.format("Druid does not support decimal column type." +
-  "Either cast column [%s] to double or Enable Approximate 
Result for Druid by setting property [%s] to true",
-  columnNames.get(i), 
HiveConf.ConfVars.HIVE_DRUID_APPROX_RESULT.varname));
-}
-break;
+throw new UnsupportedOperationException(
+String.format("Druid does not support decimal column type cast 
column "
++ "[%s] to double", columnNames.get(i)));
+
   case TIMESTAMP:
 // Granularity column
 String tColumnName = columnNames.get(i);

http://git-wip-us.apache.org/repos/asf/hive/blob/71d211d2/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
--
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
index 

hive git commit: HIVE-19474 : Decimal type should be casted as part of the CTAS or INSERT Clause. (Slim Bouguerra via Jesus Camacho Rodriguez)

2018-05-14 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/master 985d6f812 -> 8657a1c33


HIVE-19474 : Decimal type should be casted as part of the CTAS or INSERT 
Clause. (Slim Bouguerra via Jesus Camacho Rodriguez)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8657a1c3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8657a1c3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8657a1c3

Branch: refs/heads/master
Commit: 8657a1c338df6593e8f4c8b6b699fdeba24694bc
Parents: 985d6f8
Author: Slim Bouguerra 
Authored: Mon May 14 09:34:14 2018 -0700
Committer: Ashutosh Chauhan 
Committed: Mon May 14 09:34:14 2018 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  3 -
 .../hive/druid/DruidStorageHandlerUtils.java| 15 ++---
 .../hadoop/hive/druid/serde/DruidSerDe.java | 60 
 3 files changed, 28 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8657a1c3/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 44b9eb2..514257f 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2688,9 +2688,6 @@ public class HiveConf extends Configuration {
 "Wait time in ms default to 30 seconds."
 ),
 HIVE_DRUID_BITMAP_FACTORY_TYPE("hive.druid.bitmap.type", "roaring", new 
PatternSet("roaring", "concise"), "Coding algorithm use to encode the bitmaps"),
-HIVE_DRUID_APPROX_RESULT("hive.druid.approx.result", false,
-"Whether to allow approximate results from druid. \n" +
-"When set to true decimals will be stored as double and druid is 
allowed to return approximate results for decimal columns."),
 // For HBase storage handler
 HIVE_HBASE_WAL_ENABLED("hive.hbase.wal.enabled", true,
 "Whether writes to HBase should be forced to the write-ahead log. \n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/8657a1c3/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
--
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
index 93d3e5c..076f00a 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
@@ -817,8 +817,6 @@ public final class DruidStorageHandlerUtils {
 // Default, all columns that are not metrics or timestamp, are treated as 
dimensions
 final List dimensions = new ArrayList<>();
 ImmutableList.Builder aggregatorFactoryBuilder = 
ImmutableList.builder();
-final boolean approximationAllowed = HiveConf
-.getBoolVar(jc, HiveConf.ConfVars.HIVE_DRUID_APPROX_RESULT);
 for (int i = 0; i < columnTypes.size(); i++) {
   final PrimitiveObjectInspector.PrimitiveCategory primitiveCategory = 
((PrimitiveTypeInfo) columnTypes
   .get(i)).getPrimitiveCategory();
@@ -835,15 +833,10 @@ public final class DruidStorageHandlerUtils {
 af = new DoubleSumAggregatorFactory(columnNames.get(i), 
columnNames.get(i));
 break;
   case DECIMAL:
-if (approximationAllowed) {
-  af = new DoubleSumAggregatorFactory(columnNames.get(i), 
columnNames.get(i));
-} else {
-  throw new UnsupportedOperationException(
-  String.format("Druid does not support decimal column type." +
-  "Either cast column [%s] to double or Enable Approximate 
Result for Druid by setting property [%s] to true",
-  columnNames.get(i), 
HiveConf.ConfVars.HIVE_DRUID_APPROX_RESULT.varname));
-}
-break;
+throw new UnsupportedOperationException(
+String.format("Druid does not support decimal column type cast 
column "
++ "[%s] to double", columnNames.get(i)));
+
   case TIMESTAMP:
 // Granularity column
 String tColumnName = columnNames.get(i);

http://git-wip-us.apache.org/repos/asf/hive/blob/8657a1c3/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
--
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java

hive git commit: HIVE-19515 : TestRpc.testServerPort is consistently failing (Sahil Takiar via Jesus Camacho Rodriguez)

2018-05-14 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/master 5d3c5387e -> 985d6f812


HIVE-19515 : TestRpc.testServerPort is consistently failing (Sahil Takiar via 
Jesus Camacho Rodriguez)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/985d6f81
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/985d6f81
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/985d6f81

Branch: refs/heads/master
Commit: 985d6f8125c363ed28954a684bd86eb622ff9ba1
Parents: 5d3c538
Author: Sahil Takiar 
Authored: Mon May 14 09:24:53 2018 -0700
Committer: Ashutosh Chauhan 
Committed: Mon May 14 09:24:53 2018 -0700

--
 .../test/java/org/apache/hive/spark/client/rpc/TestRpc.java| 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/985d6f81/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestRpc.java
--
diff --git 
a/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestRpc.java 
b/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestRpc.java
index fafdff7..5653e4d 100644
--- a/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestRpc.java
+++ b/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestRpc.java
@@ -197,8 +197,10 @@ public class TestRpc {
 try {
   autoClose(new RpcServer(config));
   assertTrue("Invalid port range should throw an exception", false); // 
Should not reach here
-} catch(IOException e) {
-  assertEquals("Incorrect RPC server port configuration for HiveServer2", 
e.getMessage());
+} catch(IllegalArgumentException e) {
+  assertEquals(
+  "Malformed configuration value for " + 
HiveConf.ConfVars.SPARK_RPC_SERVER_PORT.varname,
+  e.getMessage());
 }
 
 // Retry logic



hive git commit: HIVE-19509: Disable tests that are failing continuously (Jesus Camacho Rodriguez) (addendum)

2018-05-14 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 4d2cd44a4 -> 5d3c5387e


HIVE-19509: Disable tests that are failing continuously (Jesus Camacho 
Rodriguez) (addendum)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5d3c5387
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5d3c5387
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5d3c5387

Branch: refs/heads/master
Commit: 5d3c5387ea56015516f357365a521d901366ad9a
Parents: 4d2cd44
Author: Jesus Camacho Rodriguez 
Authored: Mon May 14 09:17:16 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Mon May 14 09:17:16 2018 -0700

--
 .../main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java| 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/5d3c5387/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
--
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
index a94a41b..1814f0f 100644
--- 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
+++ 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
@@ -224,6 +224,7 @@ public class CliConfigs {
 excludeQuery("special_character_in_tabnames_1.q");
 excludeQuery("sysdb.q");
 excludeQuery("tez_smb_1.q");
+excludeQuery("union_fast_stats.q");
 
 setResultsDir("ql/src/test/results/clientpositive/llap");
 setLogDir("itests/qtest/target/qfile-results/clientpositive");



hive git commit: HIVE-19509: Disable tests that are failing continuously (Jesus Camacho Rodriguez)

2018-05-14 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 411bfdb0b -> 4d2cd44a4


HIVE-19509: Disable tests that are failing continuously (Jesus Camacho 
Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4d2cd44a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4d2cd44a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4d2cd44a

Branch: refs/heads/master
Commit: 4d2cd44a423d7271b3e26eff6b0e9c46544fd311
Parents: 411bfdb
Author: Jesus Camacho Rodriguez 
Authored: Mon May 14 08:55:54 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Mon May 14 09:12:57 2018 -0700

--
 .../hadoop/hive/cli/control/CliConfigs.java |   4 +
 .../results/clientpositive/llap/mm_bhif.q.out   | 131 +++
 2 files changed, 135 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4d2cd44a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
--
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
index 85f41bc..a94a41b 100644
--- 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
+++ 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
@@ -220,6 +220,10 @@ public class CliConfigs {
 
 includesFrom(testConfigProps, "minillaplocal.query.files");
 includesFrom(testConfigProps, "minillaplocal.shared.query.files");
+excludeQuery("bucket_map_join_tez1.q");
+excludeQuery("special_character_in_tabnames_1.q");
+excludeQuery("sysdb.q");
+excludeQuery("tez_smb_1.q");
 
 setResultsDir("ql/src/test/results/clientpositive/llap");
 setLogDir("itests/qtest/target/qfile-results/clientpositive");

http://git-wip-us.apache.org/repos/asf/hive/blob/4d2cd44a/ql/src/test/results/clientpositive/llap/mm_bhif.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/mm_bhif.q.out 
b/ql/src/test/results/clientpositive/llap/mm_bhif.q.out
new file mode 100644
index 000..f6a7ed3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/mm_bhif.q.out
@@ -0,0 +1,131 @@
+PREHOOK: query: CREATE TABLE T1_mm(key STRING, val STRING) PARTITIONED BY (ds 
string)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE  
tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T1_mm
+POSTHOOK: query: CREATE TABLE T1_mm(key STRING, val STRING) PARTITIONED BY (ds 
string)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE  
tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T1_mm
+PREHOOK: query: LOAD DATA LOCAL INPATH 
'../../data/files/bucket_files/00_0' INTO TABLE T1_mm  PARTITION (ds='1')
+PREHOOK: type: LOAD
+ A masked pattern was here 
+PREHOOK: Output: default@t1_mm
+POSTHOOK: query: LOAD DATA LOCAL INPATH 
'../../data/files/bucket_files/00_0' INTO TABLE T1_mm  PARTITION (ds='1')
+POSTHOOK: type: LOAD
+ A masked pattern was here 
+POSTHOOK: Output: default@t1_mm
+POSTHOOK: Output: default@t1_mm@ds=1
+PREHOOK: query: INSERT OVERWRITE TABLE T1_mm PARTITION (ds='1') select key, 
val from T1_mm where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_mm
+PREHOOK: Input: default@t1_mm@ds=1
+PREHOOK: Output: default@t1_mm@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE T1_mm PARTITION (ds='1') select key, 
val from T1_mm where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_mm
+POSTHOOK: Input: default@t1_mm@ds=1
+POSTHOOK: Output: default@t1_mm@ds=1
+POSTHOOK: Lineage: t1_mm PARTITION(ds=1).key SIMPLE 
[(t1_mm)t1_mm.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t1_mm PARTITION(ds=1).val SIMPLE 
[(t1_mm)t1_mm.FieldSchema(name:val, type:string, comment:null), ]
+PREHOOK: query: select * from T1_mm
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_mm
+PREHOOK: Input: default@t1_mm@ds=1
+ A masked pattern was here 
+POSTHOOK: query: select * from T1_mm
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_mm
+POSTHOOK: Input: default@t1_mm@ds=1
+ A masked pattern was here 
+1  11  1
+2  12  1
+3  13  1
+7  17  1
+8  18  1
+8  28  1
+PREHOOK: query: explain
+select count(distinct key) from T1_mm
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(distinct key) from T1_mm

hive git commit: HIVE-19159: TestMTQueries.testMTQueries1 failure (Laszlo Bodor via Zoltan Haindrich)

2018-05-14 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/branch-3 685fc9f4c -> 1db0521b5


HIVE-19159: TestMTQueries.testMTQueries1 failure (Laszlo Bodor via Zoltan 
Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1db0521b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1db0521b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1db0521b

Branch: refs/heads/branch-3
Commit: 1db0521b593fd1e354c75fd719a98d231ded84d6
Parents: 685fc9f
Author: Laszlo Bodor 
Authored: Mon May 14 15:46:50 2018 +0200
Committer: Zoltan Haindrich 
Committed: Mon May 14 15:46:50 2018 +0200

--
 .../apache/hadoop/hive/ql/TestMTQueries.java|  2 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java| 93 +++-
 2 files changed, 50 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1db0521b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMTQueries.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMTQueries.java 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMTQueries.java
index 4838856..6ed872d 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMTQueries.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMTQueries.java
@@ -42,7 +42,7 @@ public class TestMTQueries extends BaseTestQueries {
   // derby fails creating multiple stats aggregator concurrently
   util.getConf().setBoolean("hive.exec.submitviachild", true);
   util.getConf().setBoolean("hive.exec.submit.local.task.via.child", true);
-  util.getConf().setBoolean("hive.vectorized.execution.enabled", false);
+  util.getConf().setBoolean("hive.vectorized.execution.enabled", true);
   util.getConf().set("hive.stats.dbclass", "fs");
   util.getConf().set("hive.mapred.mode", "nonstrict");
   util.getConf().set("hive.stats.column.autogather", "false");

http://git-wip-us.apache.org/repos/asf/hive/blob/1db0521b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
--
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java 
b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index 58680fe..f6729a9 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -36,6 +36,7 @@ import java.io.OutputStream;
 import java.io.PrintStream;
 import java.io.Serializable;
 import java.io.StringWriter;
+import java.io.UnsupportedEncodingException;
 import java.net.URL;
 import java.nio.charset.StandardCharsets;
 import java.sql.SQLException;
@@ -1200,11 +1201,13 @@ public class QTestUtil {
 
 DatasetCollection datasets = parser.getDatasets();
 for (String table : datasets.getTables()){
-  initDataset(table);
+  synchronized (QTestUtil.class){
+initDataset(table);
+  }
 }
   }
 
-  private void initDataset(String table) {
+  protected void initDataset(String table) {
 if (getSrcTables().contains(table)){
   return;
 }
@@ -1270,7 +1273,7 @@ public class QTestUtil {
 initDataSetForTest(file);
 
 HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
-"org.apache.hadoop.hive.ql.security.DummyAuthenticator");
+"org.apache.hadoop.hive.ql.security.DummyAuthenticator");
 Utilities.clearWorkMap(conf);
 CliSessionState ss = new CliSessionState(conf);
 assert ss != null;
@@ -1287,6 +1290,30 @@ public class QTestUtil {
 }
 
 File outf = new File(logDir, stdoutName);
+
+setSessionOutputs(fileName, ss, outf);
+
+SessionState oldSs = SessionState.get();
+
+boolean canReuseSession = !qNoSessionReuseQuerySet.contains(fileName);
+restartSessions(canReuseSession, ss, oldSs);
+
+closeSession(oldSs);
+
+SessionState.start(ss);
+
+cliDriver = new CliDriver();
+
+if (fileName.equals("init_file.q")) {
+  ss.initFiles.add(AbstractCliConfig.HIVE_ROOT + 
"/data/scripts/test_init_file.sql");
+}
+cliDriver.processInitFiles(ss);
+
+return outf.getAbsolutePath();
+  }
+
+  private void setSessionOutputs(String fileName, CliSessionState ss, File 
outf)
+  throws FileNotFoundException, Exception, UnsupportedEncodingException {
 OutputStream fo = new BufferedOutputStream(new FileOutputStream(outf));
 if (qSortQuerySet.contains(fileName)) {
   ss.out = new SortPrintStream(fo, "UTF-8");
@@ -1299,10 +1326,12 @@ public class QTestUtil {
 }
 ss.err = new CachingPrintStream(fo, true, "UTF-8");
 

hive git commit: HIVE-19159: TestMTQueries.testMTQueries1 failure (Laszlo Bodor via Zoltan Haindrich)

2018-05-14 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 0a70831c9 -> 411bfdb0b


HIVE-19159: TestMTQueries.testMTQueries1 failure (Laszlo Bodor via Zoltan 
Haindrich)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/411bfdb0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/411bfdb0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/411bfdb0

Branch: refs/heads/master
Commit: 411bfdb0b80a59e7c84a665ea1c864ec28e3fe9b
Parents: 0a70831
Author: Laszlo Bodor 
Authored: Mon May 14 15:45:52 2018 +0200
Committer: Zoltan Haindrich 
Committed: Mon May 14 15:45:52 2018 +0200

--
 .../apache/hadoop/hive/ql/TestMTQueries.java|  2 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java| 93 +++-
 2 files changed, 50 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/411bfdb0/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMTQueries.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMTQueries.java 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMTQueries.java
index 4838856..6ed872d 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMTQueries.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMTQueries.java
@@ -42,7 +42,7 @@ public class TestMTQueries extends BaseTestQueries {
   // derby fails creating multiple stats aggregator concurrently
   util.getConf().setBoolean("hive.exec.submitviachild", true);
   util.getConf().setBoolean("hive.exec.submit.local.task.via.child", true);
-  util.getConf().setBoolean("hive.vectorized.execution.enabled", false);
+  util.getConf().setBoolean("hive.vectorized.execution.enabled", true);
   util.getConf().set("hive.stats.dbclass", "fs");
   util.getConf().set("hive.mapred.mode", "nonstrict");
   util.getConf().set("hive.stats.column.autogather", "false");

http://git-wip-us.apache.org/repos/asf/hive/blob/411bfdb0/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
--
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java 
b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index 750fc69..16571b3 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -36,6 +36,7 @@ import java.io.OutputStream;
 import java.io.PrintStream;
 import java.io.Serializable;
 import java.io.StringWriter;
+import java.io.UnsupportedEncodingException;
 import java.net.URL;
 import java.nio.charset.StandardCharsets;
 import java.sql.SQLException;
@@ -1204,11 +1205,13 @@ public class QTestUtil {
 
 DatasetCollection datasets = parser.getDatasets();
 for (String table : datasets.getTables()){
-  initDataset(table);
+  synchronized (QTestUtil.class){
+initDataset(table);
+  }
 }
   }
 
-  protected synchronized void initDataset(String table) {
+  protected void initDataset(String table) {
 if (getSrcTables().contains(table)){
   return;
 }
@@ -1275,7 +1278,7 @@ public class QTestUtil {
 initDataSetForTest(file);
 
 HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
-"org.apache.hadoop.hive.ql.security.DummyAuthenticator");
+"org.apache.hadoop.hive.ql.security.DummyAuthenticator");
 Utilities.clearWorkMap(conf);
 CliSessionState ss = new CliSessionState(conf);
 assert ss != null;
@@ -1292,6 +1295,30 @@ public class QTestUtil {
 }
 
 File outf = new File(logDir, stdoutName);
+
+setSessionOutputs(fileName, ss, outf);
+
+SessionState oldSs = SessionState.get();
+
+boolean canReuseSession = !qNoSessionReuseQuerySet.contains(fileName);
+restartSessions(canReuseSession, ss, oldSs);
+
+closeSession(oldSs);
+
+SessionState.start(ss);
+
+cliDriver = new CliDriver();
+
+if (fileName.equals("init_file.q")) {
+  ss.initFiles.add(AbstractCliConfig.HIVE_ROOT + 
"/data/scripts/test_init_file.sql");
+}
+cliDriver.processInitFiles(ss);
+
+return outf.getAbsolutePath();
+  }
+
+  private void setSessionOutputs(String fileName, CliSessionState ss, File 
outf)
+  throws FileNotFoundException, Exception, UnsupportedEncodingException {
 OutputStream fo = new BufferedOutputStream(new FileOutputStream(outf));
 if (qSortQuerySet.contains(fileName)) {
   ss.out = new SortPrintStream(fo, "UTF-8");
@@ -1304,10 +1331,12 @@ public class QTestUtil {
 }
 ss.err = new CachingPrintStream(fo, true, "UTF-8");

hive git commit: HIVE-18906: Lower Logging for "Using direct SQL" (Antal Sinkovits via Peter Vary)

2018-05-14 Thread pvary
Repository: hive
Updated Branches:
  refs/heads/master 7d7c18396 -> 0a70831c9


HIVE-18906: Lower Logging for "Using direct SQL" (Antal Sinkovits via Peter 
Vary)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0a70831c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0a70831c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0a70831c

Branch: refs/heads/master
Commit: 0a70831c99706a5ba4a18262f1aafb232eefdf68
Parents: 7d7c183
Author: Antal Sinkovits 
Authored: Mon May 14 14:00:41 2018 +0200
Committer: Peter Vary 
Committed: Mon May 14 14:00:41 2018 +0200

--
 .../java/org/apache/hadoop/hive/metastore/HiveMetaStore.java  | 7 +++
 .../org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java  | 2 +-
 .../java/org/apache/hadoop/hive/metastore/ObjectStore.java| 2 ++
 3 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0a70831c/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index a2b8743..92d2e3f 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -653,6 +653,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
   setHMSHandler(this);
   configuration.set(key, value);
   notifyMetaListeners(key, oldValue, value);
+
+  if (ConfVars.TRY_DIRECT_SQL == confVar) {
+HMSHandler.LOG.info("Direct SQL optimization = {}",  value);
+  }
 }
 
 @Override
@@ -8893,6 +8897,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
   HMSHandler.LOG.info("TCP keepalive = " + tcpKeepAlive);
   HMSHandler.LOG.info("Enable SSL = " + useSSL);
 
+  boolean directSqlEnabled = MetastoreConf.getBoolVar(conf, 
ConfVars.TRY_DIRECT_SQL);
+  HMSHandler.LOG.info("Direct SQL optimization = {}",  directSqlEnabled);
+
   if (startLock != null) {
 signalOtherThreadsToStart(tServer, startLock, startCondition, 
startedServing);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/0a70831c/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index 56fbfed..48f77b9 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -182,7 +182,7 @@ class MetaStoreDirectSql {
   boolean isInTest = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST);
   isCompatibleDatastore = (!isInTest || ensureDbInit()) && runTestQuery();
   if (isCompatibleDatastore) {
-LOG.info("Using direct SQL, underlying DB is " + dbType);
+LOG.debug("Using direct SQL, underlying DB is " + dbType);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0a70831c/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index b0a805f..264fdb9 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -3490,6 +3490,7 @@ public class ObjectStore implements RawStore, 
Configurable {
   try {
 directSql.prepareTxn();
 this.results = getSqlResult(this);
+LOG.debug("Using direct SQL optimization.");
   } catch (Exception ex) {
 handleDirectSqlError(ex);
   }
@@ -3499,6 +3500,7 @@ public class ObjectStore implements RawStore, 
Configurable {
 //2) DirectSQL threw and was disabled in handleDirectSqlError.
 if (!doUseDirectSql) {
   this.results = getJdoResult(this);
+  LOG.debug("Not using direct SQL optimization.");
 }
 return commit();
   } catch (NoSuchObjectException ex) {



hive git commit: HIVE-19077: Handle duplicate ptests requests standing in queue at the same time (Adam Szita via Peter Vary)

2018-05-14 Thread pvary
Repository: hive
Updated Branches:
  refs/heads/master 1542c88d5 -> 7d7c18396


HIVE-19077: Handle duplicate ptests requests standing in queue at the same time 
(Adam Szita via Peter Vary)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7d7c1839
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7d7c1839
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7d7c1839

Branch: refs/heads/master
Commit: 7d7c1839625b7be9846b75c373efb557d03cf3d8
Parents: 1542c88
Author: Adam Szita 
Authored: Mon May 14 10:49:36 2018 +0200
Committer: Peter Vary 
Committed: Mon May 14 10:49:36 2018 +0200

--
 dev-support/jenkins-common.sh   |   2 -
 dev-support/jenkins-execute-build.sh|   3 +-
 .../hive/ptest/api/client/JenkinsQueueUtil.java | 143 ---
 .../hive/ptest/api/client/PTestClient.java  |  11 +-
 .../org/apache/hive/ptest/execution/PTest.java  |   3 +-
 .../hive/ptest/execution/TestCheckPhase.java|  22 ++-
 .../ptest/execution/TestTestCheckPhase.java |  36 -
 .../src/test/resources/HIVE-19077.1.patch   |  14 ++
 8 files changed, 68 insertions(+), 166 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/7d7c1839/dev-support/jenkins-common.sh
--
diff --git a/dev-support/jenkins-common.sh b/dev-support/jenkins-common.sh
index 64f486f..0467d11 100644
--- a/dev-support/jenkins-common.sh
+++ b/dev-support/jenkins-common.sh
@@ -15,8 +15,6 @@
 # limitations under the License.
 
 JIRA_ROOT_URL="https://issues.apache.org;
-JENKINS_URL="https://builds.apache.org;
-JENKINS_QUEUE_QUERY="/queue/api/json?tree=items[task[name],inQueueSince,actions[parameters[name,value]],why]"
 
 fail() {
   echo "$@" 1>&2

http://git-wip-us.apache.org/repos/asf/hive/blob/7d7c1839/dev-support/jenkins-execute-build.sh
--
diff --git a/dev-support/jenkins-execute-build.sh 
b/dev-support/jenkins-execute-build.sh
index 35392dd..f660fcb 100644
--- a/dev-support/jenkins-execute-build.sh
+++ b/dev-support/jenkins-execute-build.sh
@@ -51,8 +51,7 @@ call_ptest_server() {
local 
PTEST_CLASSPATH="$PTEST_BUILD_DIR/hive/testutils/ptest2/target/hive-ptest-3.0-classes.jar:$PTEST_BUILD_DIR/hive/testutils/ptest2/target/lib/*"
 
java -cp "$PTEST_CLASSPATH" 
org.apache.hive.ptest.api.client.PTestClient --command testStart \
-   --outputDir "$PTEST_BUILD_DIR/hive/testutils/ptest2/target" 
--password "$JIRA_PASSWORD" \
-   --jenkinsQueueUrl "$JENKINS_URL$JENKINS_QUEUE_QUERY" "$@"
+   --outputDir "$PTEST_BUILD_DIR/hive/testutils/ptest2/target" 
--password "$JIRA_PASSWORD" "$@"
 }
 
 # Unpack all test results

http://git-wip-us.apache.org/repos/asf/hive/blob/7d7c1839/testutils/ptest2/src/main/java/org/apache/hive/ptest/api/client/JenkinsQueueUtil.java
--
diff --git 
a/testutils/ptest2/src/main/java/org/apache/hive/ptest/api/client/JenkinsQueueUtil.java
 
b/testutils/ptest2/src/main/java/org/apache/hive/ptest/api/client/JenkinsQueueUtil.java
deleted file mode 100644
index f335164..000
--- 
a/testutils/ptest2/src/main/java/org/apache/hive/ptest/api/client/JenkinsQueueUtil.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.hive.ptest.api.client;
-
-import java.io.IOException;
-import java.security.KeyManagementException;
-import java.security.NoSuchAlgorithmException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.http.HttpResponse;
-import org.apache.http.StatusLine;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClientBuilder;
-import org.apache.http.ssl.SSLContexts;
-import org.apache.http.util.EntityUtils;
-

hive git commit: Addendum to HIVE-19384: Vectorization: IfExprTimestamp* do not handle NULLs correctly

2018-05-14 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/branch-3 e2d4f3476 -> 685fc9f4c


Addendum to HIVE-19384: Vectorization: IfExprTimestamp* do not handle NULLs 
correctly


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/685fc9f4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/685fc9f4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/685fc9f4

Branch: refs/heads/branch-3
Commit: 685fc9f4c1b7d3d49b262e2280fec63f3d52bcb9
Parents: e2d4f34
Author: Matt McCline 
Authored: Mon May 14 02:10:00 2018 -0500
Committer: Matt McCline 
Committed: Mon May 14 02:10:41 2018 -0500

--
 .../IfExprIntervalDayTimeColumnColumn.java  | 178 --
 .../IfExprIntervalDayTimeColumnScalar.java  | 164 
 .../IfExprIntervalDayTimeScalarColumn.java  | 185 ---
 .../IfExprIntervalDayTimeScalarScalar.java  | 170 -
 .../IfExprTimestampColumnColumn.java|  54 --
 .../IfExprTimestampColumnColumnBase.java| 148 ---
 .../IfExprTimestampColumnScalar.java|  59 --
 .../IfExprTimestampColumnScalarBase.java| 143 --
 .../IfExprTimestampScalarColumn.java|  59 --
 .../IfExprTimestampScalarColumnBase.java| 169 -
 .../IfExprTimestampScalarScalar.java|  59 --
 .../IfExprTimestampScalarScalarBase.java| 149 ---
 12 files changed, 1537 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/685fc9f4/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnColumn.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnColumn.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnColumn.java
deleted file mode 100644
index 13e5fff..000
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnColumn.java
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.exec.vector.expressions;
-
-import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.IntervalDayTimeColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-
-/**
- * Compute IF(expr1, expr2, expr3) for 3 input column expressions.
- * The first is always a boolean (LongColumnVector).
- * The second and third are long columns or long expression results.
- */
-public class IfExprIntervalDayTimeColumnColumn extends VectorExpression {
-
-  private static final long serialVersionUID = 1L;
-
-  private final int arg1Column;
-  private final int arg2Column;
-  private final int arg3Column;
-
-  public IfExprIntervalDayTimeColumnColumn(int arg1Column, int arg2Column, int 
arg3Column,
-  int outputColumnNum) {
-super(outputColumnNum);
-this.arg1Column = arg1Column;
-this.arg2Column = arg2Column;
-this.arg3Column = arg3Column;
-  }
-
-  public IfExprIntervalDayTimeColumnColumn() {
-super();
-
-// Dummy final assignments.
-arg1Column = -1;
-arg2Column = -1;
-arg3Column = -1;
-  }
-
-  @Override
-  public void evaluate(VectorizedRowBatch batch) throws HiveException {
-
-if (childExpressions != null) {
-  super.evaluateChildren(batch);
-}
-
-LongColumnVector arg1ColVector = (LongColumnVector) batch.cols[arg1Column];
-IntervalDayTimeColumnVector arg2ColVector = (IntervalDayTimeColumnVector) 
batch.cols[arg2Column];
-IntervalDayTimeColumnVector arg3ColVector = (IntervalDayTimeColumnVector) 
batch.cols[arg3Column];
-IntervalDayTimeColumnVector outputColVector = 
(IntervalDayTimeColumnVector) 

hive git commit: Addendum to HIVE-19384: Vectorization: IfExprTimestamp* do not handle NULLs correctly

2018-05-14 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/master f3beace2e -> 1542c88d5


Addendum to HIVE-19384: Vectorization: IfExprTimestamp* do not handle NULLs 
correctly


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1542c88d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1542c88d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1542c88d

Branch: refs/heads/master
Commit: 1542c88d5ff9bb6a81491031918561d24dee59c6
Parents: f3beace
Author: Matt McCline 
Authored: Mon May 14 02:10:00 2018 -0500
Committer: Matt McCline 
Committed: Mon May 14 02:10:00 2018 -0500

--
 .../IfExprIntervalDayTimeColumnColumn.java  | 178 --
 .../IfExprIntervalDayTimeColumnScalar.java  | 164 
 .../IfExprIntervalDayTimeScalarColumn.java  | 185 ---
 .../IfExprIntervalDayTimeScalarScalar.java  | 170 -
 .../IfExprTimestampColumnColumn.java|  54 --
 .../IfExprTimestampColumnColumnBase.java| 148 ---
 .../IfExprTimestampColumnScalar.java|  59 --
 .../IfExprTimestampColumnScalarBase.java| 143 --
 .../IfExprTimestampScalarColumn.java|  59 --
 .../IfExprTimestampScalarColumnBase.java| 169 -
 .../IfExprTimestampScalarScalar.java|  59 --
 .../IfExprTimestampScalarScalarBase.java| 149 ---
 12 files changed, 1537 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1542c88d/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnColumn.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnColumn.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnColumn.java
deleted file mode 100644
index 13e5fff..000
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnColumn.java
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.exec.vector.expressions;
-
-import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.IntervalDayTimeColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-
-/**
- * Compute IF(expr1, expr2, expr3) for 3 input column expressions.
- * The first is always a boolean (LongColumnVector).
- * The second and third are long columns or long expression results.
- */
-public class IfExprIntervalDayTimeColumnColumn extends VectorExpression {
-
-  private static final long serialVersionUID = 1L;
-
-  private final int arg1Column;
-  private final int arg2Column;
-  private final int arg3Column;
-
-  public IfExprIntervalDayTimeColumnColumn(int arg1Column, int arg2Column, int 
arg3Column,
-  int outputColumnNum) {
-super(outputColumnNum);
-this.arg1Column = arg1Column;
-this.arg2Column = arg2Column;
-this.arg3Column = arg3Column;
-  }
-
-  public IfExprIntervalDayTimeColumnColumn() {
-super();
-
-// Dummy final assignments.
-arg1Column = -1;
-arg2Column = -1;
-arg3Column = -1;
-  }
-
-  @Override
-  public void evaluate(VectorizedRowBatch batch) throws HiveException {
-
-if (childExpressions != null) {
-  super.evaluateChildren(batch);
-}
-
-LongColumnVector arg1ColVector = (LongColumnVector) batch.cols[arg1Column];
-IntervalDayTimeColumnVector arg2ColVector = (IntervalDayTimeColumnVector) 
batch.cols[arg2Column];
-IntervalDayTimeColumnVector arg3ColVector = (IntervalDayTimeColumnVector) 
batch.cols[arg3Column];
-IntervalDayTimeColumnVector outputColVector = 
(IntervalDayTimeColumnVector) 

[3/4] hive git commit: HIVE-19384: Vectorization: IfExprTimestamp* do not handle NULLs correctly (Matt McCline, reviewed by Teddy Choi)

2018-05-14 Thread mmccline
http://git-wip-us.apache.org/repos/asf/hive/blob/e2d4f347/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorIfStatement.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorIfStatement.java
 
b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorIfStatement.java
new file mode 100644
index 000..c52ca19
--- /dev/null
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorIfStatement.java
@@ -0,0 +1,444 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector.expressions;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.hive.common.type.DataTypePhysicalVariation;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator;
+import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory;
+import org.apache.hadoop.hive.ql.exec.vector.VectorExtractRow;
+import org.apache.hadoop.hive.ql.exec.vector.VectorRandomBatchSource;
+import org.apache.hadoop.hive.ql.exec.vector.VectorRandomRowSource;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIf;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFWhen;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.io.LongWritable;
+
+import junit.framework.Assert;
+
+import org.junit.Test;
+
+public class TestVectorIfStatement {
+
+  @Test
+  public void testBoolean() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "boolean");
+  }
+
+  @Test
+  public void testInt() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "int");
+  }
+
+  @Test
+  public void testBigInt() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "bigint");
+  }
+
+  @Test
+  public void testString() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "string");
+  }
+
+  @Test
+  public void testTimestamp() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "timestamp");
+  }
+
+  @Test
+  public void testDate() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "date");
+  }
+
+  @Test
+  public void testIntervalDayTime() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "interval_day_time");
+  }
+
+  @Test
+  public void testIntervalYearMonth() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "interval_year_month");
+  }
+
+  @Test
+  public void testDouble() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "double");
+  }
+
+  @Test
+  public void testChar() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "char(10)");
+  }
+
+  @Test
+  public void testVarchar() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "varchar(15)");
+  }
+
+  @Test
+  public void testBinary() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "binary");
+  }
+
+  @Test
+  public void 

[1/4] hive git commit: HIVE-19384: Vectorization: IfExprTimestamp* do not handle NULLs correctly (Matt McCline, reviewed by Teddy Choi)

2018-05-14 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/branch-3 e941bea80 -> e2d4f3476


http://git-wip-us.apache.org/repos/asf/hive/blob/e2d4f347/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out 
b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
index 244aca6..01e915b 100644
--- a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
@@ -1,15 +1,19 @@
-PREHOOK: query: CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, 
stimestamp1 string) STORED AS ORC
+PREHOOK: query: CREATE TABLE alltypesorc_string(cboolean1 boolean, ctimestamp1 
timestamp, stimestamp1 string,
+ctimestamp2 timestamp) STORED AS ORC
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@alltypesorc_string
-POSTHOOK: query: CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, 
stimestamp1 string) STORED AS ORC
+POSTHOOK: query: CREATE TABLE alltypesorc_string(cboolean1 boolean, 
ctimestamp1 timestamp, stimestamp1 string,
+ctimestamp2 timestamp) STORED AS ORC
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@alltypesorc_string
 PREHOOK: query: INSERT OVERWRITE TABLE alltypesorc_string
 SELECT
+  cboolean1,
   to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS toutc,
-  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst
+  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst,
+  ctimestamp2
 FROM alltypesorc
 ORDER BY toutc, cst
 LIMIT 40
@@ -18,16 +22,146 @@ PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@alltypesorc_string
 POSTHOOK: query: INSERT OVERWRITE TABLE alltypesorc_string
 SELECT
+  cboolean1,
   to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS toutc,
-  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst
+  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst,
+  ctimestamp2
 FROM alltypesorc
 ORDER BY toutc, cst
 LIMIT 40
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, 
comment:null), ]
 POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 EXPRESSION 
[(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, 
comment:null), ]
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp2 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, 
comment:null), ]
 POSTHOOK: Lineage: alltypesorc_string.stimestamp1 EXPRESSION 
[(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, 
comment:null), ]
+PREHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', '1978-08-05 14:41:05.501', '1999-10-03 
16:59:10.396903939')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@alltypesorc_string
+POSTHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', '1978-08-05 14:41:05.501', '1999-10-03 
16:59:10.396903939')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp2 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.stimestamp1 SCRIPT []
+PREHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, null, 
'2013-04-10 00:43:46.8547315', null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@alltypesorc_string
+POSTHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, null, 
'2013-04-10 00:43:46.8547315', null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 EXPRESSION []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp2 EXPRESSION []
+POSTHOOK: Lineage: alltypesorc_string.stimestamp1 SCRIPT []
+PREHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', null, null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@alltypesorc_string
+POSTHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', null, null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 SCRIPT []
+POSTHOOK: Lineage: 

[4/4] hive git commit: HIVE-19384: Vectorization: IfExprTimestamp* do not handle NULLs correctly (Matt McCline, reviewed by Teddy Choi)

2018-05-14 Thread mmccline
HIVE-19384: Vectorization: IfExprTimestamp* do not handle NULLs correctly (Matt 
McCline, reviewed by Teddy Choi)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e2d4f347
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e2d4f347
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e2d4f347

Branch: refs/heads/branch-3
Commit: e2d4f3476f67331e3690903ad12a30aeb47126d0
Parents: e941bea
Author: Matt McCline 
Authored: Mon May 14 01:57:12 2018 -0500
Committer: Matt McCline 
Committed: Mon May 14 01:58:43 2018 -0500

--
 .../IfExprObjectColumnColumn.txt| 217 +
 .../IfExprObjectColumnScalar.txt| 194 
 .../IfExprObjectScalarColumn.txt| 196 
 .../IfExprObjectScalarScalar.txt| 166 +++
 .../ql/exec/vector/VectorizationContext.java|   5 -
 .../expressions/IfExprLongColumnLongColumn.java |   7 +-
 .../hive/ql/udf/generic/GenericUDFIf.java   |  16 +-
 .../exec/vector/TestVectorizationContext.java   |   8 +-
 .../ql/exec/vector/VectorRandomBatchSource.java | 311 +
 .../ql/exec/vector/VectorRandomRowSource.java   | 312 +
 .../expressions/TestVectorIfStatement.java  | 444 +++
 .../clientpositive/vectorized_timestamp_funcs.q |  48 +-
 .../llap/vectorized_timestamp_funcs.q.out   | 384 
 .../spark/vectorized_timestamp_funcs.q.out  | 384 
 .../vectorized_timestamp_funcs.q.out| 382 
 .../hive/ql/exec/vector/VectorizedRowBatch.java |  91 ++--
 .../ql/exec/vector/TestStructColumnVector.java  |   4 +-
 .../apache/hadoop/hive/tools/GenVectorCode.java |  55 +++
 18 files changed, 2813 insertions(+), 411 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/e2d4f347/ql/src/gen/vectorization/ExpressionTemplates/IfExprObjectColumnColumn.txt
--
diff --git 
a/ql/src/gen/vectorization/ExpressionTemplates/IfExprObjectColumnColumn.txt 
b/ql/src/gen/vectorization/ExpressionTemplates/IfExprObjectColumnColumn.txt
new file mode 100644
index 000..e8ef279
--- /dev/null
+++ b/ql/src/gen/vectorization/ExpressionTemplates/IfExprObjectColumnColumn.txt
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
+
+import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
+import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+
+/**
+ * Compute IF(expr1, expr2, expr3) for 3 input column expressions.
+ * The first is always a boolean (LongColumnVector).
+ * The second and third are long columns or long expression results.
+ */
+public class  extends VectorExpression {
+
+  private static final long serialVersionUID = 1L;
+
+  private final int arg1Column;
+  private final int arg2Column;
+  private final int arg3Column;
+
+  public (int arg1Column, int arg2Column, int arg3Column,
+  int outputColumnNum) {
+super(outputColumnNum);
+this.arg1Column = arg1Column;
+this.arg2Column = arg2Column;
+this.arg3Column = arg3Column;
+  }
+
+  public () {
+super();
+
+// Dummy final assignments.
+arg1Column = -1;
+arg2Column = -1;
+arg3Column = -1;
+  }
+
+  @Override
+  public void evaluate(VectorizedRowBatch batch) throws HiveException {
+
+if (childExpressions != null) {
+  super.evaluateChildren(batch);
+}
+
+LongColumnVector arg1ColVector = (LongColumnVector) batch.cols[arg1Column];
+ arg2ColVector = () 
batch.cols[arg2Column];
+boolean[] arg2IsNull = arg2ColVector.isNull;
+ arg3ColVector = () 
batch.cols[arg3Column];
+boolean[] arg3IsNull = 

[2/4] hive git commit: HIVE-19384: Vectorization: IfExprTimestamp* do not handle NULLs correctly (Matt McCline, reviewed by Teddy Choi)

2018-05-14 Thread mmccline
http://git-wip-us.apache.org/repos/asf/hive/blob/e2d4f347/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out 
b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
index 68b89a7..fe5fd23 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
@@ -1,15 +1,19 @@
-PREHOOK: query: CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, 
stimestamp1 string) STORED AS ORC
+PREHOOK: query: CREATE TABLE alltypesorc_string(cboolean1 boolean, ctimestamp1 
timestamp, stimestamp1 string,
+ctimestamp2 timestamp) STORED AS ORC
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@alltypesorc_string
-POSTHOOK: query: CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, 
stimestamp1 string) STORED AS ORC
+POSTHOOK: query: CREATE TABLE alltypesorc_string(cboolean1 boolean, 
ctimestamp1 timestamp, stimestamp1 string,
+ctimestamp2 timestamp) STORED AS ORC
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@alltypesorc_string
 PREHOOK: query: INSERT OVERWRITE TABLE alltypesorc_string
 SELECT
+  cboolean1,
   to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS toutc,
-  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst
+  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst,
+  ctimestamp2
 FROM alltypesorc
 ORDER BY toutc, cst
 LIMIT 40
@@ -18,16 +22,146 @@ PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@alltypesorc_string
 POSTHOOK: query: INSERT OVERWRITE TABLE alltypesorc_string
 SELECT
+  cboolean1,
   to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS toutc,
-  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst
+  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst,
+  ctimestamp2
 FROM alltypesorc
 ORDER BY toutc, cst
 LIMIT 40
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, 
comment:null), ]
 POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 EXPRESSION 
[(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, 
comment:null), ]
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp2 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, 
comment:null), ]
 POSTHOOK: Lineage: alltypesorc_string.stimestamp1 EXPRESSION 
[(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, 
comment:null), ]
+PREHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', '1978-08-05 14:41:05.501', '1999-10-03 
16:59:10.396903939')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@alltypesorc_string
+POSTHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', '1978-08-05 14:41:05.501', '1999-10-03 
16:59:10.396903939')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp2 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.stimestamp1 SCRIPT []
+PREHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, null, 
'2013-04-10 00:43:46.8547315', null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@alltypesorc_string
+POSTHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, null, 
'2013-04-10 00:43:46.8547315', null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 EXPRESSION []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp2 EXPRESSION []
+POSTHOOK: Lineage: alltypesorc_string.stimestamp1 SCRIPT []
+PREHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', null, null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@alltypesorc_string
+POSTHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', null, null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp2 EXPRESSION []
+POSTHOOK: 

[4/4] hive git commit: HIVE-19384: Vectorization: IfExprTimestamp* do not handle NULLs correctly (Matt McCline, reviewed by Teddy Choi)

2018-05-14 Thread mmccline
HIVE-19384: Vectorization: IfExprTimestamp* do not handle NULLs correctly (Matt 
McCline, reviewed by Teddy Choi)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f3beace2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f3beace2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f3beace2

Branch: refs/heads/master
Commit: f3beace2ebfa989cc1d8a4f491cb705bf58ecd82
Parents: f327624
Author: Matt McCline 
Authored: Mon May 14 01:57:12 2018 -0500
Committer: Matt McCline 
Committed: Mon May 14 01:57:12 2018 -0500

--
 .../IfExprObjectColumnColumn.txt| 217 +
 .../IfExprObjectColumnScalar.txt| 194 
 .../IfExprObjectScalarColumn.txt| 196 
 .../IfExprObjectScalarScalar.txt| 166 +++
 .../ql/exec/vector/VectorizationContext.java|   5 -
 .../expressions/IfExprLongColumnLongColumn.java |   7 +-
 .../hive/ql/udf/generic/GenericUDFIf.java   |  16 +-
 .../exec/vector/TestVectorizationContext.java   |   8 +-
 .../ql/exec/vector/VectorRandomBatchSource.java | 311 +
 .../ql/exec/vector/VectorRandomRowSource.java   | 312 +
 .../expressions/TestVectorIfStatement.java  | 444 +++
 .../clientpositive/vectorized_timestamp_funcs.q |  49 +-
 .../llap/vectorized_timestamp_funcs.q.out   | 384 
 .../spark/vectorized_timestamp_funcs.q.out  | 384 
 .../vectorized_timestamp_funcs.q.out| 382 
 .../hive/ql/exec/vector/VectorizedRowBatch.java |  91 ++--
 .../ql/exec/vector/TestStructColumnVector.java  |   4 +-
 .../apache/hadoop/hive/tools/GenVectorCode.java |  55 +++
 18 files changed, 2814 insertions(+), 411 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f3beace2/ql/src/gen/vectorization/ExpressionTemplates/IfExprObjectColumnColumn.txt
--
diff --git 
a/ql/src/gen/vectorization/ExpressionTemplates/IfExprObjectColumnColumn.txt 
b/ql/src/gen/vectorization/ExpressionTemplates/IfExprObjectColumnColumn.txt
new file mode 100644
index 000..e8ef279
--- /dev/null
+++ b/ql/src/gen/vectorization/ExpressionTemplates/IfExprObjectColumnColumn.txt
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
+
+import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
+import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+
+/**
+ * Compute IF(expr1, expr2, expr3) for 3 input column expressions.
+ * The first is always a boolean (LongColumnVector).
+ * The second and third are long columns or long expression results.
+ */
+public class  extends VectorExpression {
+
+  private static final long serialVersionUID = 1L;
+
+  private final int arg1Column;
+  private final int arg2Column;
+  private final int arg3Column;
+
+  public (int arg1Column, int arg2Column, int arg3Column,
+  int outputColumnNum) {
+super(outputColumnNum);
+this.arg1Column = arg1Column;
+this.arg2Column = arg2Column;
+this.arg3Column = arg3Column;
+  }
+
+  public () {
+super();
+
+// Dummy final assignments.
+arg1Column = -1;
+arg2Column = -1;
+arg3Column = -1;
+  }
+
+  @Override
+  public void evaluate(VectorizedRowBatch batch) throws HiveException {
+
+if (childExpressions != null) {
+  super.evaluateChildren(batch);
+}
+
+LongColumnVector arg1ColVector = (LongColumnVector) batch.cols[arg1Column];
+ arg2ColVector = () 
batch.cols[arg2Column];
+boolean[] arg2IsNull = arg2ColVector.isNull;
+ arg3ColVector = () 
batch.cols[arg3Column];
+boolean[] arg3IsNull = 

[1/4] hive git commit: HIVE-19384: Vectorization: IfExprTimestamp* do not handle NULLs correctly (Matt McCline, reviewed by Teddy Choi)

2018-05-14 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/master f3276241b -> f3beace2e


http://git-wip-us.apache.org/repos/asf/hive/blob/f3beace2/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out 
b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
index 244aca6..01e915b 100644
--- a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
@@ -1,15 +1,19 @@
-PREHOOK: query: CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, 
stimestamp1 string) STORED AS ORC
+PREHOOK: query: CREATE TABLE alltypesorc_string(cboolean1 boolean, ctimestamp1 
timestamp, stimestamp1 string,
+ctimestamp2 timestamp) STORED AS ORC
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@alltypesorc_string
-POSTHOOK: query: CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, 
stimestamp1 string) STORED AS ORC
+POSTHOOK: query: CREATE TABLE alltypesorc_string(cboolean1 boolean, 
ctimestamp1 timestamp, stimestamp1 string,
+ctimestamp2 timestamp) STORED AS ORC
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@alltypesorc_string
 PREHOOK: query: INSERT OVERWRITE TABLE alltypesorc_string
 SELECT
+  cboolean1,
   to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS toutc,
-  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst
+  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst,
+  ctimestamp2
 FROM alltypesorc
 ORDER BY toutc, cst
 LIMIT 40
@@ -18,16 +22,146 @@ PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@alltypesorc_string
 POSTHOOK: query: INSERT OVERWRITE TABLE alltypesorc_string
 SELECT
+  cboolean1,
   to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS toutc,
-  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst
+  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst,
+  ctimestamp2
 FROM alltypesorc
 ORDER BY toutc, cst
 LIMIT 40
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, 
comment:null), ]
 POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 EXPRESSION 
[(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, 
comment:null), ]
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp2 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, 
comment:null), ]
 POSTHOOK: Lineage: alltypesorc_string.stimestamp1 EXPRESSION 
[(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, 
comment:null), ]
+PREHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', '1978-08-05 14:41:05.501', '1999-10-03 
16:59:10.396903939')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@alltypesorc_string
+POSTHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', '1978-08-05 14:41:05.501', '1999-10-03 
16:59:10.396903939')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp2 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.stimestamp1 SCRIPT []
+PREHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, null, 
'2013-04-10 00:43:46.8547315', null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@alltypesorc_string
+POSTHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, null, 
'2013-04-10 00:43:46.8547315', null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 EXPRESSION []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp2 EXPRESSION []
+POSTHOOK: Lineage: alltypesorc_string.stimestamp1 SCRIPT []
+PREHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', null, null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@alltypesorc_string
+POSTHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', null, null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 SCRIPT []
+POSTHOOK: Lineage: 

[3/4] hive git commit: HIVE-19384: Vectorization: IfExprTimestamp* do not handle NULLs correctly (Matt McCline, reviewed by Teddy Choi)

2018-05-14 Thread mmccline
http://git-wip-us.apache.org/repos/asf/hive/blob/f3beace2/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorIfStatement.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorIfStatement.java
 
b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorIfStatement.java
new file mode 100644
index 000..c52ca19
--- /dev/null
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorIfStatement.java
@@ -0,0 +1,444 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector.expressions;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.hive.common.type.DataTypePhysicalVariation;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator;
+import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory;
+import org.apache.hadoop.hive.ql.exec.vector.VectorExtractRow;
+import org.apache.hadoop.hive.ql.exec.vector.VectorRandomBatchSource;
+import org.apache.hadoop.hive.ql.exec.vector.VectorRandomRowSource;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIf;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFWhen;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.io.LongWritable;
+
+import junit.framework.Assert;
+
+import org.junit.Test;
+
+public class TestVectorIfStatement {
+
+  @Test
+  public void testBoolean() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "boolean");
+  }
+
+  @Test
+  public void testInt() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "int");
+  }
+
+  @Test
+  public void testBigInt() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "bigint");
+  }
+
+  @Test
+  public void testString() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "string");
+  }
+
+  @Test
+  public void testTimestamp() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "timestamp");
+  }
+
+  @Test
+  public void testDate() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "date");
+  }
+
+  @Test
+  public void testIntervalDayTime() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "interval_day_time");
+  }
+
+  @Test
+  public void testIntervalYearMonth() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "interval_year_month");
+  }
+
+  @Test
+  public void testDouble() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "double");
+  }
+
+  @Test
+  public void testChar() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "char(10)");
+  }
+
+  @Test
+  public void testVarchar() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "varchar(15)");
+  }
+
+  @Test
+  public void testBinary() throws Exception {
+Random random = new Random(12882);
+
+doIfTests(random, "binary");
+  }
+
+  @Test
+  public void 

[2/4] hive git commit: HIVE-19384: Vectorization: IfExprTimestamp* do not handle NULLs correctly (Matt McCline, reviewed by Teddy Choi)

2018-05-14 Thread mmccline
http://git-wip-us.apache.org/repos/asf/hive/blob/f3beace2/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out 
b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
index 68b89a7..fe5fd23 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
@@ -1,15 +1,19 @@
-PREHOOK: query: CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, 
stimestamp1 string) STORED AS ORC
+PREHOOK: query: CREATE TABLE alltypesorc_string(cboolean1 boolean, ctimestamp1 
timestamp, stimestamp1 string,
+ctimestamp2 timestamp) STORED AS ORC
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@alltypesorc_string
-POSTHOOK: query: CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, 
stimestamp1 string) STORED AS ORC
+POSTHOOK: query: CREATE TABLE alltypesorc_string(cboolean1 boolean, 
ctimestamp1 timestamp, stimestamp1 string,
+ctimestamp2 timestamp) STORED AS ORC
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@alltypesorc_string
 PREHOOK: query: INSERT OVERWRITE TABLE alltypesorc_string
 SELECT
+  cboolean1,
   to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS toutc,
-  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst
+  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst,
+  ctimestamp2
 FROM alltypesorc
 ORDER BY toutc, cst
 LIMIT 40
@@ -18,16 +22,146 @@ PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@alltypesorc_string
 POSTHOOK: query: INSERT OVERWRITE TABLE alltypesorc_string
 SELECT
+  cboolean1,
   to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS toutc,
-  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst
+  CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING) as cst,
+  ctimestamp2
 FROM alltypesorc
 ORDER BY toutc, cst
 LIMIT 40
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, 
comment:null), ]
 POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 EXPRESSION 
[(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, 
comment:null), ]
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp2 SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, 
comment:null), ]
 POSTHOOK: Lineage: alltypesorc_string.stimestamp1 EXPRESSION 
[(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, 
comment:null), ]
+PREHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', '1978-08-05 14:41:05.501', '1999-10-03 
16:59:10.396903939')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@alltypesorc_string
+POSTHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', '1978-08-05 14:41:05.501', '1999-10-03 
16:59:10.396903939')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp2 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.stimestamp1 SCRIPT []
+PREHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, null, 
'2013-04-10 00:43:46.8547315', null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@alltypesorc_string
+POSTHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, null, 
'2013-04-10 00:43:46.8547315', null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 EXPRESSION []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp2 EXPRESSION []
+POSTHOOK: Lineage: alltypesorc_string.stimestamp1 SCRIPT []
+PREHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', null, null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@alltypesorc_string
+POSTHOOK: query: INSERT INTO TABLE alltypesorc_string values (false, 
'2021-09-24 03:18:32.4', null, null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@alltypesorc_string
+POSTHOOK: Lineage: alltypesorc_string.cboolean1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp1 SCRIPT []
+POSTHOOK: Lineage: alltypesorc_string.ctimestamp2 EXPRESSION []
+POSTHOOK: