HIVE-19474 : Decimal type should be casted as part of the CTAS or INSERT 
Clause. (Slim Bouguerra via Jesus Camacho Rodriguez)

Signed-off-by: Ashutosh Chauhan <hashut...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/71d211d2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/71d211d2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/71d211d2

Branch: refs/heads/branch-3
Commit: 71d211d2dbf53031da27aec562b4fba48939841d
Parents: 1db0521
Author: Slim Bouguerra <slim.bougue...@gmail.com>
Authored: Mon May 14 09:34:14 2018 -0700
Committer: Vineet Garg <vg...@apache.org>
Committed: Mon May 14 11:13:14 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  3 -
 .../hive/druid/DruidStorageHandlerUtils.java    | 15 ++---
 .../hadoop/hive/druid/serde/DruidSerDe.java     | 60 ++++++++------------
 3 files changed, 28 insertions(+), 50 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/71d211d2/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 3bb1e80..e56c14f 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2682,9 +2682,6 @@ public class HiveConf extends Configuration {
             "Wait time in ms default to 30 seconds."
     ),
     HIVE_DRUID_BITMAP_FACTORY_TYPE("hive.druid.bitmap.type", "roaring", new 
PatternSet("roaring", "concise"), "Coding algorithm use to encode the bitmaps"),
-    HIVE_DRUID_APPROX_RESULT("hive.druid.approx.result", false,
-        "Whether to allow approximate results from druid. \n" +
-        "When set to true decimals will be stored as double and druid is 
allowed to return approximate results for decimal columns."),
     // For HBase storage handler
     HIVE_HBASE_WAL_ENABLED("hive.hbase.wal.enabled", true,
         "Whether writes to HBase should be forced to the write-ahead log. \n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/71d211d2/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
index 93d3e5c..076f00a 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
@@ -817,8 +817,6 @@ public final class DruidStorageHandlerUtils {
     // Default, all columns that are not metrics or timestamp, are treated as 
dimensions
     final List<DimensionSchema> dimensions = new ArrayList<>();
     ImmutableList.Builder<AggregatorFactory> aggregatorFactoryBuilder = 
ImmutableList.builder();
-    final boolean approximationAllowed = HiveConf
-        .getBoolVar(jc, HiveConf.ConfVars.HIVE_DRUID_APPROX_RESULT);
     for (int i = 0; i < columnTypes.size(); i++) {
       final PrimitiveObjectInspector.PrimitiveCategory primitiveCategory = 
((PrimitiveTypeInfo) columnTypes
           .get(i)).getPrimitiveCategory();
@@ -835,15 +833,10 @@ public final class DruidStorageHandlerUtils {
         af = new DoubleSumAggregatorFactory(columnNames.get(i), 
columnNames.get(i));
         break;
       case DECIMAL:
-        if (approximationAllowed) {
-          af = new DoubleSumAggregatorFactory(columnNames.get(i), 
columnNames.get(i));
-        } else {
-          throw new UnsupportedOperationException(
-              String.format("Druid does not support decimal column type." +
-                      "Either cast column [%s] to double or Enable Approximate 
Result for Druid by setting property [%s] to true",
-                  columnNames.get(i), 
HiveConf.ConfVars.HIVE_DRUID_APPROX_RESULT.varname));
-        }
-        break;
+        throw new UnsupportedOperationException(
+            String.format("Druid does not support decimal column type cast 
column "
+                + "[%s] to double", columnNames.get(i)));
+
       case TIMESTAMP:
         // Granularity column
         String tColumnName = columnNames.get(i);

http://git-wip-us.apache.org/repos/asf/hive/blob/71d211d2/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
index d991adb..5f76579 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
@@ -17,26 +17,17 @@
  */
 package org.apache.hadoop.hive.druid.serde;
 
-import java.io.IOException;
-import java.io.InputStream;
-import java.sql.Timestamp;
-import java.time.Instant;
-import java.time.ZonedDateTime;
-import java.time.format.DateTimeFormatter;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Properties;
-import java.util.stream.Collectors;
-
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import io.druid.query.Druids;
+import io.druid.query.Druids.SegmentMetadataQueryBuilder;
+import io.druid.query.metadata.metadata.ColumnAnalysis;
+import io.druid.query.metadata.metadata.SegmentAnalysis;
+import io.druid.query.metadata.metadata.SegmentMetadataQuery;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.type.HiveChar;
-import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.common.type.HiveVarchar;
 import org.apache.hadoop.hive.common.type.TimestampTZ;
 import org.apache.hadoop.hive.conf.Constants;
@@ -53,7 +44,6 @@ import org.apache.hadoop.hive.serde2.SerDeStats;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
 import org.apache.hadoop.hive.serde2.io.HiveCharWritable;
-import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable;
 import org.apache.hadoop.hive.serde2.io.ShortWritable;
 import org.apache.hadoop.hive.serde2.io.TimestampLocalTZWritable;
@@ -67,7 +57,6 @@ import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.ByteObjectInspect
 import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector;
 import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.FloatObjectInspector;
 import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveCharObjectInspector;
-import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector;
 import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveVarcharObjectInspector;
 import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.IntObjectInspector;
 import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
@@ -92,15 +81,21 @@ import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.fasterxml.jackson.core.type.TypeReference;
-import com.google.common.base.Function;
-import com.google.common.collect.Lists;
-
-import io.druid.query.Druids;
-import io.druid.query.Druids.SegmentMetadataQueryBuilder;
-import io.druid.query.metadata.metadata.ColumnAnalysis;
-import io.druid.query.metadata.metadata.SegmentAnalysis;
-import io.druid.query.metadata.metadata.SegmentMetadataQuery;
+import java.io.IOException;
+import java.io.InputStream;
+import java.sql.Timestamp;
+import java.time.Instant;
+import java.time.ZonedDateTime;
+import java.time.format.DateTimeFormatter;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.stream.Collectors;
 
 /**
  * DruidSerDe that is used to  deserialize objects from a Druid data source.
@@ -350,10 +345,6 @@ public class DruidSerDe extends AbstractSerDe {
           res = ((DoubleObjectInspector) 
fields.get(i).getFieldObjectInspector())
                   .get(values.get(i));
           break;
-        case DECIMAL:
-          res = ((HiveDecimalObjectInspector) 
fields.get(i).getFieldObjectInspector())
-                  .getPrimitiveJavaObject(values.get(i)).doubleValue();
-          break;
         case CHAR:
           res = ((HiveCharObjectInspector) 
fields.get(i).getFieldObjectInspector())
                   .getPrimitiveJavaObject(values.get(i)).getValue();
@@ -371,7 +362,7 @@ public class DruidSerDe extends AbstractSerDe {
                   .get(values.get(i));
           break;
         default:
-          throw new SerDeException("Unknown type: " + 
types[i].getPrimitiveCategory());
+          throw new SerDeException("Unsupported type: " + 
types[i].getPrimitiveCategory());
       }
       value.put(columns[i], res);
     }
@@ -452,9 +443,6 @@ public class DruidSerDe extends AbstractSerDe {
         case DOUBLE:
           output.add(new DoubleWritable(((Number) value).doubleValue()));
           break;
-        case DECIMAL:
-          output.add(new HiveDecimalWritable(HiveDecimal.create(((Number) 
value).doubleValue())));
-          break;
         case CHAR:
           output.add(
               new HiveCharWritable(

Reply via email to