This is an automated email from the ASF dual-hosted git repository.

jcamacho pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 94093e8  HIVE-24154: Missing simplification opportunity with IN and 
EQUALS clauses (Jesus Camacho Rodriguez, reviewed by Zoltan Haindrich)
94093e8 is described below

commit 94093e86e96786e36756fef802938cd65ef4fd0e
Author: Jesús Camacho Rodríguez <jcama...@apache.org>
AuthorDate: Wed Sep 30 13:07:31 2020 -0700

    HIVE-24154: Missing simplification opportunity with IN and EQUALS clauses 
(Jesus Camacho Rodriguez, reviewed by Zoltan Haindrich)
    
    Closes apache/hive#1492
---
 .../rules/HivePointLookupOptimizerRule.java        | 398 +++++++++++----------
 .../hive/ql/plan/mapping/TestCounterMapping.java   |   4 +-
 .../test/queries/clientpositive/multi_in_clause.q  |  15 +-
 .../clientpositive/llap/bucketpruning1.q.out       |  18 +-
 .../llap/dynamic_partition_skip_default.q.out      |   4 +-
 .../llap/external_jdbc_table_perf.q.out            |  12 +-
 .../llap/fold_eq_with_case_when.q.out              |   4 +-
 .../clientpositive/llap/join_filters_overlap.q.out |  24 +-
 .../results/clientpositive/llap/masking_mv.q.out   |  16 +-
 .../llap/materialized_view_rewrite_ssb.q.out       |  40 +--
 .../llap/materialized_view_rewrite_ssb_2.q.out     |  40 +--
 ...terialized_view_rewrite_ssb_grouping_sets.q.out |  22 +-
 ...rialized_view_rewrite_ssb_grouping_sets_2.q.out |   2 +-
 .../clientpositive/llap/multi_in_clause.q.out      |  69 +++-
 ql/src/test/results/clientpositive/llap/pcr.q.out  |   4 +-
 .../clientpositive/llap/ppd_union_view.q.out       |  26 +-
 .../llap/rand_partitionpruner2.q.out               |   2 +-
 .../llap/rand_partitionpruner3.q.out               |   2 +-
 .../test/results/clientpositive/llap/union22.q.out |   2 +-
 .../clientpositive/llap/unionDistinct_1.q.out      |   2 +-
 .../clientpositive/llap/vector_interval_2.q.out    |  36 +-
 .../clientpositive/perf/tez/cbo_query74.q.out      |   8 +-
 .../perf/tez/constraints/cbo_query74.q.out         |   8 +-
 .../perf/tez/constraints/query74.q.out             |   8 +-
 .../results/clientpositive/perf/tez/query74.q.out  |   6 +-
 25 files changed, 438 insertions(+), 334 deletions(-)

diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
index 7332587..57e2ffe 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
@@ -47,6 +47,7 @@ import org.apache.calcite.rex.RexShuttle;
 import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveBetween;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIn;
@@ -54,11 +55,8 @@ import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Function;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.LinkedHashMultimap;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 import com.google.common.collect.Multimap;
 import com.google.common.collect.Multimaps;
 import com.google.common.collect.Sets;
@@ -72,7 +70,7 @@ import com.google.common.collect.Sets;
  * </pre>
  * If c is struct; then c=v1 is a group of anded equations.
  *
- * Similarily
+ * Similarly
  * <pre>
  * v1 &lt;= c1 and c1 &lt;= v2
  * </pre>
@@ -159,9 +157,8 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
         return;
       }
       Project newProject = project.copy(project.getTraitSet(), 
project.getInput(), newProjects,
-          project.getRowType(), project.getFlags());
+          project.getRowType());
       call.transformTo(newProject);
-
     }
 
   }
@@ -187,7 +184,7 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
     newCondition = mergeInClause.apply(newCondition);
 
     // 3. Close BETWEEN expressions if possible
-    RexTranformIntoBetween t = new RexTranformIntoBetween(rexBuilder);
+    RexTransformIntoBetween t = new RexTransformIntoBetween(rexBuilder);
     newCondition = t.apply(newCondition);
     return newCondition;
   }
@@ -196,7 +193,7 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
    * Transforms inequality candidates into [NOT] BETWEEN calls.
    *
    */
-  protected static class RexTranformIntoBetween extends RexShuttle {
+  protected static class RexTransformIntoBetween extends RexShuttle {
     private final RexBuilder rexBuilder;
 
     static class DiGraph<V, E> {
@@ -248,7 +245,7 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
       public void putEdgeValue(V s, V t, E e) {
         Node<V, E> nodeS = nodeOf(s);
         Node<V, E> nodeT = nodeOf(t);
-        Edge<V, E> edge = new Edge<V, E>(nodeS, nodeT, e);
+        Edge<V, E> edge = new Edge<>(nodeS, nodeT, e);
         nodeS.addEdge(edge);
         nodeT.addEdge(edge);
       }
@@ -256,7 +253,7 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
       private Node<V, E> nodeOf(V s) {
         Node<V, E> node = nodes.get(s);
         if (node == null) {
-          nodes.put(s, node = new Node<V, E>(s));
+          nodes.put(s, node = new Node<>(s));
         }
         return node;
       }
@@ -302,7 +299,7 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
 
     }
 
-    RexTranformIntoBetween(RexBuilder rexBuilder) {
+    RexTransformIntoBetween(RexBuilder rexBuilder) {
       this.rexBuilder = rexBuilder;
     }
 
@@ -313,9 +310,9 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
         RexCall call = (RexCall) node;
         switch (call.getKind()) {
         case AND:
-          return processComparisions(call, SqlKind.LESS_THAN_OR_EQUAL, false);
+          return processComparisons(call, SqlKind.LESS_THAN_OR_EQUAL, false);
         case OR:
-          return processComparisions(call, SqlKind.GREATER_THAN, true);
+          return processComparisons(call, SqlKind.GREATER_THAN, true);
         default:
           break;
         }
@@ -339,9 +336,9 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
       }
     }
 
-    private RexNode processComparisions(RexCall call, SqlKind forwardEdge, 
boolean invert) {
+    private RexNode processComparisons(RexCall call, SqlKind forwardEdge, 
boolean invert) {
       DiGraph<RexNodeRef, RexCall> g =
-          buildComparisionGraph(call.getOperands(), forwardEdge);
+          buildComparisonGraph(call.getOperands(), forwardEdge);
       Map<RexNode, BetweenCandidate> replacedNodes = new IdentityHashMap<>();
       for (RexNodeRef n : g.nodes()) {
         Set<RexNodeRef> pred = g.predecessors(n);
@@ -387,11 +384,11 @@ public abstract class HivePointLookupOptimizerRule 
extends RelOptRule {
     }
 
     /**
-     * Builds a graph of the given comparision type.
+     * Builds a graph of the given comparison type.
      *
-     * The graph edges are annotated with the RexNodes representing the 
comparision.
+     * The graph edges are annotated with the RexNodes representing the 
comparison.
      */
-    private DiGraph<RexNodeRef, RexCall> buildComparisionGraph(List<RexNode> 
operands, SqlKind cmpForward) {
+    private DiGraph<RexNodeRef, RexCall> buildComparisonGraph(List<RexNode> 
operands, SqlKind cmpForward) {
       DiGraph<RexNodeRef, RexCall> g = new DiGraph<>();
       for (RexNode node : operands) {
         if(!(node instanceof RexCall) ) {
@@ -420,8 +417,8 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
   static class RexNodeRef {
 
     public static Comparator<RexNodeRef> COMPARATOR =
-        (RexNodeRef o1, RexNodeRef o2) -> 
o1.node.toString().compareTo(o2.node.toString());
-    private RexNode node;
+        Comparator.comparing((RexNodeRef o) -> o.node.toString());
+    private final RexNode node;
 
     public RexNodeRef(RexNode node) {
       this.node = node;
@@ -452,6 +449,64 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
   }
 
   /**
+   * Represents a constraint.
+   *
+   * Example: a=1
+   * substr(a,1,2) = concat('asd','xxx')
+   */
+  static class Constraint {
+
+    private final RexNode exprNode;
+    private final RexNode constNode;
+
+    public Constraint(RexNode exprNode, RexNode constNode) {
+      this.exprNode = exprNode;
+      this.constNode = constNode;
+    }
+
+    /**
+     * Interprets argument as a constraint; if not possible returns null.
+     */
+    public static Constraint of(RexNode n) {
+      if (!(n instanceof RexCall)) {
+        return null;
+      }
+      RexCall call = (RexCall) n;
+      if (call.getOperator().getKind() != SqlKind.EQUALS) {
+        return null;
+      }
+      RexNode opA = call.operands.get(0);
+      RexNode opB = call.operands.get(1);
+      if (RexUtil.isNull(opA) || RexUtil.isNull(opB)) {
+        // dont try to compare nulls
+        return null;
+      }
+      if (isConstExpr(opA) && isColumnExpr(opB)) {
+        return new Constraint(opB, opA);
+      }
+      if (isColumnExpr(opA) && isConstExpr(opB)) {
+        return new Constraint(opA, opB);
+      }
+      return null;
+    }
+
+    private static boolean isColumnExpr(RexNode node) {
+      return !node.getType().isStruct() && 
HiveCalciteUtil.getInputRefs(node).size() > 0
+          && HiveCalciteUtil.isDeterministic(node);
+    }
+
+    private static boolean isConstExpr(RexNode node) {
+      return !node.getType().isStruct() && 
HiveCalciteUtil.getInputRefs(node).size() == 0
+          && HiveCalciteUtil.isDeterministic(node);
+    }
+
+    public RexNodeRef getKey() {
+      return new RexNodeRef(exprNode);
+    }
+
+  }
+
+  /**
    * Transforms OR clauses into IN clauses, when possible.
    */
   protected static class RexTransformIntoInClause extends RexShuttle {
@@ -468,8 +523,7 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
       RexNode node = super.visitCall(inputCall);
       if (node instanceof RexCall) {
         RexCall call = (RexCall) node;
-        switch (call.getKind()) {
-        case OR:
+        if (call.getKind() == SqlKind.OR) {
           try {
             RexNode newNode = transformIntoInClauseCondition(rexBuilder,
                 call, minNumORClauses);
@@ -480,72 +534,12 @@ public abstract class HivePointLookupOptimizerRule 
extends RelOptRule {
             LOG.error("Exception in HivePointLookupOptimizerRule", e);
             return call;
           }
-        default:
-          break;
         }
       }
       return node;
     }
 
     /**
-     * Represents a contraint.
-     *
-     * Example: a=1
-     * substr(a,1,2) = concat('asd','xxx')
-     */
-    static class Constraint {
-
-      private RexNode exprNode;
-      private RexNode constNode;
-
-      public Constraint(RexNode exprNode, RexNode constNode) {
-        this.exprNode = constNode;
-        this.constNode = exprNode;
-      }
-
-      /**
-       * Interprets argument as a constraint; if not possible returns null.
-       */
-      public static Constraint of(RexNode n) {
-        if (!(n instanceof RexCall)) {
-          return null;
-        }
-        RexCall call = (RexCall) n;
-        if (call.getOperator().getKind() != SqlKind.EQUALS) {
-          return null;
-        }
-        RexNode opA = call.operands.get(0);
-        RexNode opB = call.operands.get(1);
-        if (RexUtil.isNull(opA) || RexUtil.isNull(opB)) {
-          // dont try to compare nulls
-          return null;
-        }
-        if (isConstExpr(opA) && isColumnExpr(opB)) {
-          return new Constraint(opB, opA);
-        }
-        if (isColumnExpr(opA) && isConstExpr(opB)) {
-          return new Constraint(opA, opB);
-        }
-        return null;
-      }
-
-      private static boolean isColumnExpr(RexNode node) {
-        return !node.getType().isStruct() && 
HiveCalciteUtil.getInputRefs(node).size() > 0
-            && HiveCalciteUtil.isDeterministic(node);
-      }
-
-      private static boolean isConstExpr(RexNode node) {
-        return !node.getType().isStruct() && 
HiveCalciteUtil.getInputRefs(node).size() == 0
-            && HiveCalciteUtil.isDeterministic(node);
-      }
-
-      public RexNodeRef getKey() {
-        return new RexNodeRef(constNode);
-      }
-
-    }
-
-    /**
      * A group of Constraints.
      *
      * Examples:
@@ -557,16 +551,8 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
      *
      */
     static class ConstraintGroup {
-      public static final Function<ConstraintGroup, Set<RexNodeRef>> 
KEY_FUNCTION =
-          new Function<ConstraintGroup, Set<RexNodeRef>>() {
-
-            @Override
-            public Set<RexNodeRef> apply(ConstraintGroup cg) {
-              return cg.key;
-            }
-          };
-      private Map<RexNodeRef, Constraint> constraints = new HashMap<>();
-      private RexNode originalRexNode;
+      private final Map<RexNodeRef, Constraint> constraints = new HashMap<>();
+      private final RexNode originalRexNode;
       private final Set<RexNodeRef> key;
 
       public ConstraintGroup(RexNode rexNode) {
@@ -599,7 +585,7 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
           if (constraint == null) {
             throw new SemanticException("Unable to find constraint which was 
earlier added.");
           }
-          ret.add(constraint.exprNode);
+          ret.add(constraint.constNode);
         }
         return ret;
       }
@@ -622,7 +608,7 @@ public abstract class HivePointLookupOptimizerRule extends 
RelOptRule {
       }
 
       Multimap<Set<RexNodeRef>, ConstraintGroup> assignmentGroups =
-          Multimaps.index(allNodes, ConstraintGroup.KEY_FUNCTION);
+          Multimaps.index(allNodes, cg -> cg.key);
 
       for (Entry<Set<RexNodeRef>, Collection<ConstraintGroup>> sa : 
assignmentGroups.asMap().entrySet()) {
         // skip opaque
@@ -656,12 +642,11 @@ public abstract class HivePointLookupOptimizerRule 
extends RelOptRule {
 
     private RexNode buildInFor(Set<RexNodeRef> set, 
Collection<ConstraintGroup> value) throws SemanticException {
 
-      List<RexNodeRef> columns = new ArrayList<>();
-      columns.addAll(set);
+      List<RexNodeRef> columns = new ArrayList<>(set);
       columns.sort(RexNodeRef.COMPARATOR);
       List<RexNode >operands = new ArrayList<>();
 
-      List<RexNode> columnNodes = columns.stream().map(n -> 
n.getRexNode()).collect(Collectors.toList());
+      List<RexNode> columnNodes = 
columns.stream().map(RexNodeRef::getRexNode).collect(Collectors.toList());
       operands.add(useStructIfNeeded(columnNodes));
       for (ConstraintGroup node : value) {
         List<RexNode> values = node.getValuesInOrder(columns);
@@ -693,102 +678,153 @@ public abstract class HivePointLookupOptimizerRule 
extends RelOptRule {
     }
 
     @Override public RexNode visitCall(RexCall call) {
-      RexNode node;
-      final List<RexNode> operands;
-      final List<RexNode> newOperands;
-      Map<String,RexNode> stringToExpr = Maps.newHashMap();
-      Multimap<String,String> inLHSExprToRHSExprs = 
LinkedHashMultimap.create();
       switch (call.getKind()) {
         case AND:
-          // IN clauses need to be combined by keeping only common elements
-          operands = 
Lists.newArrayList(RexUtil.flattenAnd(call.getOperands()));
-          for (int i = 0; i < operands.size(); i++) {
-            RexNode operand = operands.get(i);
-            if (operand.getKind() == SqlKind.IN) {
-              RexCall inCall = (RexCall) operand;
-              if 
(!HiveCalciteUtil.isDeterministic(inCall.getOperands().get(0))) {
-                continue;
-              }
-              String ref = inCall.getOperands().get(0).toString();
-              stringToExpr.put(ref, inCall.getOperands().get(0));
-              if (inLHSExprToRHSExprs.containsKey(ref)) {
-                Set<String> expressions = Sets.newHashSet();
-                for (int j = 1; j < inCall.getOperands().size(); j++) {
-                  String expr = inCall.getOperands().get(j).toString();
-                  expressions.add(expr);
-                  stringToExpr.put(expr, inCall.getOperands().get(j));
-                }
-                inLHSExprToRHSExprs.get(ref).retainAll(expressions);
-                if (!inLHSExprToRHSExprs.containsKey(ref)) {
-                  // Note that Multimap does not keep a key if all its values 
are removed.
-                  // Hence, since there are no common expressions and it is 
within an AND,
-                  // we should return false
-                  return rexBuilder.makeLiteral(false);
-                }
-              } else {
-                for (int j = 1; j < inCall.getOperands().size(); j++) {
-                  String expr = inCall.getOperands().get(j).toString();
-                  inLHSExprToRHSExprs.put(ref, expr);
-                  stringToExpr.put(expr, inCall.getOperands().get(j));
-                }
-              }
-              operands.remove(i);
-              --i;
-            }
-          }
-          // Create IN clauses
-          newOperands = createInClauses(rexBuilder, stringToExpr, 
inLHSExprToRHSExprs);
-          newOperands.addAll(operands);
-          // Return node
-          node = RexUtil.composeConjunction(rexBuilder, newOperands, false);
-          break;
+          return handleAND(rexBuilder, call);
         case OR:
-          // IN clauses need to be combined by keeping all elements
-          operands = Lists.newArrayList(RexUtil.flattenOr(call.getOperands()));
-          for (int i = 0; i < operands.size(); i++) {
-            RexNode operand = operands.get(i);
-            if (operand.getKind() == SqlKind.IN) {
-              RexCall inCall = (RexCall) operand;
-              if 
(!HiveCalciteUtil.isDeterministic(inCall.getOperands().get(0))) {
-                continue;
+          return handleOR(rexBuilder, call);
+        default:
+          return super.visitCall(call);
+      }
+    }
+
+    private static RexNode handleAND(RexBuilder rexBuilder, RexCall call) {
+      // Visited nodes
+      final Set<RexNode> visitedRefs = new LinkedHashSet<>();
+      // IN clauses need to be combined by keeping only common elements
+      final Multimap<RexNode,RexNode> inLHSExprToRHSExprs = 
LinkedHashMultimap.create();
+      // We will use this set to keep those expressions that may evaluate
+      // into a null value.
+      final Multimap<RexNode,RexNode> inLHSExprToRHSNullableExprs = 
LinkedHashMultimap.create();
+      final List<RexNode> operands = new 
ArrayList<>(RexUtil.flattenAnd(call.getOperands()));
+      for (int i = 0; i < operands.size(); i++) {
+        RexNode operand = operands.get(i);
+        if (operand.getKind() == SqlKind.IN) {
+          RexCall inCall = (RexCall) operand;
+          if (!HiveCalciteUtil.isDeterministic(inCall.getOperands().get(0))) {
+            continue;
+          }
+          RexNode ref = inCall.getOperands().get(0);
+          visitedRefs.add(ref);
+          if (ref.getType().isNullable()) {
+            inLHSExprToRHSNullableExprs.put(ref, ref);
+          }
+          if (inLHSExprToRHSExprs.containsKey(ref)) {
+            Set<RexNode> expressions = Sets.newHashSet();
+            for (int j = 1; j < inCall.getOperands().size(); j++) {
+              RexNode constNode = inCall.getOperands().get(j);
+              expressions.add(constNode);
+              if (constNode.getType().isNullable()) {
+                inLHSExprToRHSNullableExprs.put(ref, constNode);
               }
-              String ref = inCall.getOperands().get(0).toString();
-              stringToExpr.put(ref, inCall.getOperands().get(0));
-              for (int j = 1; j < inCall.getOperands().size(); j++) {
-                String expr = inCall.getOperands().get(j).toString();
-                inLHSExprToRHSExprs.put(ref, expr);
-                stringToExpr.put(expr, inCall.getOperands().get(j));
+            }
+            inLHSExprToRHSExprs.get(ref).retainAll(expressions);
+          } else {
+            for (int j = 1; j < inCall.getOperands().size(); j++) {
+              RexNode constNode = inCall.getOperands().get(j);
+              inLHSExprToRHSExprs.put(ref, constNode);
+              if (constNode.getType().isNullable()) {
+                inLHSExprToRHSNullableExprs.put(ref, constNode);
               }
-              operands.remove(i);
-              --i;
             }
           }
-          // Create IN clauses
-          newOperands = createInClauses(rexBuilder, stringToExpr, 
inLHSExprToRHSExprs);
-          newOperands.addAll(operands);
-          // Return node
-          node = RexUtil.composeDisjunction(rexBuilder, newOperands, false);
-          break;
-        default:
-          return super.visitCall(call);
+          operands.remove(i);
+          --i;
+        } else if (operand.getKind() == SqlKind.EQUALS) {
+          Constraint c = Constraint.of(operand);
+          if (c == null || !HiveCalciteUtil.isDeterministic(c.exprNode)) {
+            continue;
+          }
+          visitedRefs.add(c.exprNode);
+          if (c.exprNode.getType().isNullable()) {
+            inLHSExprToRHSNullableExprs.put(c.exprNode, c.exprNode);
+          }
+          if (c.constNode.getType().isNullable()) {
+            inLHSExprToRHSNullableExprs.put(c.exprNode, c.constNode);
+          }
+          if (inLHSExprToRHSExprs.containsKey(c.exprNode)) {
+            
inLHSExprToRHSExprs.get(c.exprNode).retainAll(Collections.singleton(c.constNode));
+          } else {
+            inLHSExprToRHSExprs.put(c.exprNode, c.constNode);
+          }
+          operands.remove(i);
+          --i;
+        }
       }
-      return node;
+      // Create IN clauses
+      final List<RexNode> newOperands = createInClauses(rexBuilder,
+          visitedRefs, inLHSExprToRHSExprs, inLHSExprToRHSNullableExprs);
+      newOperands.addAll(operands);
+      // Return node
+      return RexUtil.composeConjunction(rexBuilder, newOperands, false);
     }
 
-    private static List<RexNode> createInClauses(RexBuilder rexBuilder, 
Map<String, RexNode> stringToExpr,
-            Multimap<String, String> inLHSExprToRHSExprs) {
-      List<RexNode> newExpressions = Lists.newArrayList();
-      for (Entry<String,Collection<String>> entry : 
inLHSExprToRHSExprs.asMap().entrySet()) {
-        String ref = entry.getKey();
-        Collection<String> exprs = entry.getValue();
+    private static RexNode handleOR(RexBuilder rexBuilder, RexCall call) {
+      // IN clauses need to be combined by keeping all elements
+      final List<RexNode> operands = new 
ArrayList<>(RexUtil.flattenOr(call.getOperands()));
+      final Multimap<RexNode,RexNode> inLHSExprToRHSExprs = 
LinkedHashMultimap.create();
+      for (int i = 0; i < operands.size(); i++) {
+        RexNode operand = operands.get(i);
+        if (operand.getKind() == SqlKind.IN) {
+          RexCall inCall = (RexCall) operand;
+          if (!HiveCalciteUtil.isDeterministic(inCall.getOperands().get(0))) {
+            continue;
+          }
+          RexNode ref = inCall.getOperands().get(0);
+          for (int j = 1; j < inCall.getOperands().size(); j++) {
+            inLHSExprToRHSExprs.put(ref, inCall.getOperands().get(j));
+          }
+          operands.remove(i);
+          --i;
+        }
+      }
+      // Create IN clauses (fourth parameter is not needed since no 
expressions were removed)
+      final List<RexNode> newOperands = createInClauses(rexBuilder,
+          inLHSExprToRHSExprs.keySet(), inLHSExprToRHSExprs, null);
+      newOperands.addAll(operands);
+      // Return node
+      RexNode result = RexUtil.composeDisjunction(rexBuilder, newOperands, 
false);
+      if (!result.getType().equals(call.getType())) {
+        return rexBuilder.makeCast(call.getType(), result, true);
+      }
+      return result;
+    }
+
+    private static RexNode createResultFromEmptySet(RexBuilder rexBuilder,
+        RexNode ref, Multimap<RexNode, RexNode> inLHSExprToRHSNullableExprs) {
+      if (inLHSExprToRHSNullableExprs.containsKey(ref)) {
+        // We handle possible null values in the expressions.
+        List<RexNode> nullableExprs =
+            inLHSExprToRHSNullableExprs.get(ref)
+                .stream()
+                .map(n -> rexBuilder.makeCall(SqlStdOperatorTable.IS_NULL, 
ImmutableList.of(n)))
+                .collect(Collectors.toList());
+        return RexUtil.composeConjunction(rexBuilder,
+            ImmutableList.of(
+                RexUtil.composeDisjunction(rexBuilder, nullableExprs, false),
+                
rexBuilder.makeNullLiteral(rexBuilder.getTypeFactory().createSqlType(SqlTypeName.BOOLEAN))),
+            false);
+      }
+      return rexBuilder.makeLiteral(false);
+    }
+
+    private static List<RexNode> createInClauses(RexBuilder rexBuilder, 
Set<RexNode> visitedRefs,
+        Multimap<RexNode, RexNode> inLHSExprToRHSExprs, 
Multimap<RexNode,RexNode> inLHSExprToRHSNullableExprs) {
+      final List<RexNode> newExpressions = new ArrayList<>();
+      for (RexNode ref : visitedRefs) {
+        Collection<RexNode> exprs = inLHSExprToRHSExprs.get(ref);
         if (exprs.isEmpty()) {
-          newExpressions.add(rexBuilder.makeLiteral(false));
+          // Note that Multimap does not keep a key if all its values are 
removed.
+          newExpressions.add(createResultFromEmptySet(rexBuilder, ref, 
inLHSExprToRHSNullableExprs));
+        } else if (exprs.size() == 1) {
+          List<RexNode> newOperands = new ArrayList<>(2);
+          newOperands.add(ref);
+          newOperands.add(exprs.iterator().next());
+          newExpressions.add(rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, 
newOperands));
         } else {
-          List<RexNode> newOperands = new ArrayList<RexNode>(exprs.size() + 1);
-          newOperands.add(stringToExpr.get(ref));
-          for (String expr : exprs) {
-            newOperands.add(stringToExpr.get(expr));
-          }
+          List<RexNode> newOperands = new ArrayList<>(exprs.size() + 1);
+          newOperands.add(ref);
+          newOperands.addAll(exprs);
           newExpressions.add(rexBuilder.makeCall(HiveIn.INSTANCE, 
newOperands));
         }
       }
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java 
b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java
index f596324..3a50356 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java
@@ -183,9 +183,7 @@ public class TestCounterMapping {
     List<FilterOperator> fos = pm.getAll(FilterOperator.class);
     OpTreeSignature filterSig = pm.lookup(OpTreeSignature.class, fos.get(0));
     Object pred = filterSig.getSig().getSigMap().get("getPredicateString");
-    // TODO: Missed opportunity due to cast on constant preventing 
simplification.
-    // Fixed when we upgrade to Calcite 1.22.0
-    assertEquals("((u = 2) and (u) IN (1, 2, 3)) (type: boolean)", pred);
+    assertEquals("(u = 2) (type: boolean)", pred);
   }
 
   @Test
diff --git a/ql/src/test/queries/clientpositive/multi_in_clause.q 
b/ql/src/test/queries/clientpositive/multi_in_clause.q
index 9752063..a40674a 100644
--- a/ql/src/test/queries/clientpositive/multi_in_clause.q
+++ b/ql/src/test/queries/clientpositive/multi_in_clause.q
@@ -1,7 +1,18 @@
-create table very_simple_table_for_in_test (name STRING);
-insert into very_simple_table_for_in_test values ('a');
+create table very_simple_table_for_in_test (name STRING, othername STRING);
+insert into very_simple_table_for_in_test values ('a', null);
+insert into very_simple_table_for_in_test values (null, null);
 
 explain cbo
 select * from very_simple_table_for_in_test where name IN('g','r') AND name 
IN('a','b') ;
 
 select * from very_simple_table_for_in_test where name IN('g','r') AND name 
IN('a','b') ;
+
+explain cbo
+select name IN('g','r') AND name IN('a','b') from 
very_simple_table_for_in_test ;
+
+select name IN('g','r') AND name IN('a','b') from 
very_simple_table_for_in_test ;
+
+explain cbo
+select name IN('g','r') AND name IN('a','b') AND othername IN('x', 'y') from 
very_simple_table_for_in_test ;
+
+select name IN('g','r') AND name IN('a','b') AND othername IN('x', 'y') from 
very_simple_table_for_in_test ;
diff --git a/ql/src/test/results/clientpositive/llap/bucketpruning1.q.out 
b/ql/src/test/results/clientpositive/llap/bucketpruning1.q.out
index fb5c73a..8771727 100644
--- a/ql/src/test/results/clientpositive/llap/bucketpruning1.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucketpruning1.q.out
@@ -807,7 +807,7 @@ POSTHOOK: Input: default@srcbucket_pruned
 #### A masked pattern was here ####
 OPTIMIZED SQL: SELECT `key`, CAST('One' AS STRING) AS `value`, 
CAST('2008-04-08' AS STRING) AS `ds`
 FROM `default`.`srcbucket_pruned`
-WHERE `key` IN (2, 3) AND `value` = 'One' AND `ds` = '2008-04-08'
+WHERE `value` = 'One' AND `key` IN (2, 3) AND `ds` = '2008-04-08'
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -821,13 +821,13 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_pruned
-                  filterExpr: ((key) IN (2, 3) and (value = 'One') and (ds = 
'2008-04-08')) (type: boolean)
+                  filterExpr: ((value = 'One') and (key) IN (2, 3) and (ds = 
'2008-04-08')) (type: boolean)
                   buckets included: [4,6,] of 16
                   Statistics: Num rows: 1 Data size: 268 Basic stats: COMPLETE 
Column stats: COMPLETE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((key) IN (2, 3) and (value = 'One') and (ds = 
'2008-04-08')) (type: boolean)
+                    predicate: ((value = 'One') and (key) IN (2, 3) and (ds = 
'2008-04-08')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 268 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: int), 'One' (type: string), 
'2008-04-08' (type: string)
@@ -1304,7 +1304,7 @@ POSTHOOK: Input: default@srcbucket_pruned
 #### A masked pattern was here ####
 OPTIMIZED SQL: SELECT `key`, CAST('One' AS STRING) AS `value`, 
CAST('2008-04-08' AS STRING) AS `ds`
 FROM `default`.`srcbucket_pruned`
-WHERE `key` IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17) AND 
`value` = 'One' AND `ds` = '2008-04-08'
+WHERE `value` = 'One' AND `key` IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 
14, 15, 16, 17) AND `ds` = '2008-04-08'
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1318,13 +1318,13 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_pruned
-                  filterExpr: ((key) IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 
12, 13, 14, 15, 16, 17) and (value = 'One') and (ds = '2008-04-08')) (type: 
boolean)
+                  filterExpr: ((value = 'One') and (key) IN (1, 2, 3, 4, 5, 6, 
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17) and (ds = '2008-04-08')) (type: 
boolean)
                   buckets included: [1,3,4,5,6,8,11,12,13,15,] of 16
                   Statistics: Num rows: 1 Data size: 268 Basic stats: COMPLETE 
Column stats: COMPLETE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((key) IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 
12, 13, 14, 15, 16, 17) and (value = 'One') and (ds = '2008-04-08')) (type: 
boolean)
+                    predicate: ((value = 'One') and (key) IN (1, 2, 3, 4, 5, 
6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17) and (ds = '2008-04-08')) (type: 
boolean)
                     Statistics: Num rows: 1 Data size: 268 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: int), 'One' (type: string), 
'2008-04-08' (type: string)
@@ -1445,7 +1445,7 @@ POSTHOOK: Input: default@srcbucket_pruned
 #### A masked pattern was here ####
 OPTIMIZED SQL: SELECT CAST(1 AS INTEGER) AS `key`, `value`, CAST('2008-04-08' 
AS STRING) AS `ds`
 FROM `default`.`srcbucket_pruned`
-WHERE `value` IN ('One', 'Two') AND `key` = 1 AND `ds` = '2008-04-08'
+WHERE `key` = 1 AND `ds` = '2008-04-08' AND `value` IN ('One', 'Two')
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1459,13 +1459,13 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_pruned
-                  filterExpr: ((value) IN ('One', 'Two') and (key = 1) and (ds 
= '2008-04-08')) (type: boolean)
+                  filterExpr: ((key = 1) and (ds = '2008-04-08') and (value) 
IN ('One', 'Two')) (type: boolean)
                   buckets included: [13,] of 16
                   Statistics: Num rows: 1 Data size: 268 Basic stats: COMPLETE 
Column stats: COMPLETE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((value) IN ('One', 'Two') and (key = 1) and 
(ds = '2008-04-08')) (type: boolean)
+                    predicate: ((key = 1) and (ds = '2008-04-08') and (value) 
IN ('One', 'Two')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 268 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: 1 (type: int), value (type: string), 
'2008-04-08' (type: string)
diff --git 
a/ql/src/test/results/clientpositive/llap/dynamic_partition_skip_default.q.out 
b/ql/src/test/results/clientpositive/llap/dynamic_partition_skip_default.q.out
index 578cb52..deefd01 100644
--- 
a/ql/src/test/results/clientpositive/llap/dynamic_partition_skip_default.q.out
+++ 
b/ql/src/test/results/clientpositive/llap/dynamic_partition_skip_default.q.out
@@ -192,7 +192,7 @@ POSTHOOK: Input: 
default@dynamic_part_table@partcol1=1/partcol2=__HIVE_DEFAULT_P
 #### A masked pattern was here ####
 OPTIMIZED SQL: SELECT `intcol`
 FROM `default`.`dynamic_part_table`
-WHERE `partcol2` IN ('1', '__HIVE_DEFAULT_PARTITION__') AND `partcol1` = '1'
+WHERE `partcol1` = '1' AND `partcol2` IN ('1', '__HIVE_DEFAULT_PARTITION__')
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -274,7 +274,7 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: dynamic_part_table
-          filterExpr: ((partcol2) IN ('1', '__HIVE_DEFAULT_PARTITION__') and 
(partcol1 = '1')) (type: boolean)
+          filterExpr: ((partcol1 = '1') and (partcol2) IN ('1', 
'__HIVE_DEFAULT_PARTITION__')) (type: boolean)
           GatherStats: false
           Select Operator
             expressions: intcol (type: string)
diff --git 
a/ql/src/test/results/clientpositive/llap/external_jdbc_table_perf.q.out 
b/ql/src/test/results/clientpositive/llap/external_jdbc_table_perf.q.out
index 232f346..f5f6b80 100644
--- a/ql/src/test/results/clientpositive/llap/external_jdbc_table_perf.q.out
+++ b/ql/src/test/results/clientpositive/llap/external_jdbc_table_perf.q.out
@@ -2260,7 +2260,7 @@ FROM "STORE_SALES"
 WHERE "ss_customer_sk" IS NOT NULL AND "ss_sold_date_sk" IS NOT NULL) AS "t0"
 INNER JOIN (SELECT "d_date_sk"
 FROM "DATE_DIM"
-WHERE "d_moy" BETWEEN 1 AND 3 AND "d_year" = 1999 AND "d_date_sk" IS NOT NULL) 
AS "t2" ON "t0"."ss_sold_date_sk" = "t2"."d_date_sk"
+WHERE "d_year" = 1999 AND "d_moy" BETWEEN 1 AND 3 AND "d_date_sk" IS NOT NULL) 
AS "t2" ON "t0"."ss_sold_date_sk" = "t2"."d_date_sk"
                     hive.sql.query.fieldNames ss_customer_sk0
                     hive.sql.query.fieldTypes int
                     hive.sql.query.split false
@@ -2294,7 +2294,7 @@ FROM "WEB_SALES"
 WHERE "ws_bill_customer_sk" IS NOT NULL AND "ws_sold_date_sk" IS NOT NULL) AS 
"t0"
 INNER JOIN (SELECT "d_date_sk"
 FROM "DATE_DIM"
-WHERE "d_moy" BETWEEN 1 AND 3 AND "d_year" = 1999 AND "d_date_sk" IS NOT NULL) 
AS "t2" ON "t0"."ws_sold_date_sk" = "t2"."d_date_sk"
+WHERE "d_year" = 1999 AND "d_moy" BETWEEN 1 AND 3 AND "d_date_sk" IS NOT NULL) 
AS "t2" ON "t0"."ws_sold_date_sk" = "t2"."d_date_sk"
                     hive.sql.query.fieldNames literalTrue,ws_bill_customer_sk0
                     hive.sql.query.fieldTypes boolean,int
                     hive.sql.query.split false
@@ -2323,7 +2323,7 @@ FROM "CATALOG_SALES"
 WHERE "cs_ship_customer_sk" IS NOT NULL AND "cs_sold_date_sk" IS NOT NULL) AS 
"t0"
 INNER JOIN (SELECT "d_date_sk"
 FROM "DATE_DIM"
-WHERE "d_moy" BETWEEN 1 AND 3 AND "d_year" = 1999 AND "d_date_sk" IS NOT NULL) 
AS "t2" ON "t0"."cs_sold_date_sk" = "t2"."d_date_sk"
+WHERE "d_year" = 1999 AND "d_moy" BETWEEN 1 AND 3 AND "d_date_sk" IS NOT NULL) 
AS "t2" ON "t0"."cs_sold_date_sk" = "t2"."d_date_sk"
                     hive.sql.query.fieldNames literalTrue,cs_ship_customer_sk0
                     hive.sql.query.fieldTypes boolean,int
                     hive.sql.query.split false
@@ -6469,7 +6469,7 @@ FROM "STORE_SALES"
 WHERE "ss_customer_sk" IS NOT NULL AND "ss_sold_date_sk" IS NOT NULL) AS "t0"
 INNER JOIN (SELECT "d_date_sk"
 FROM "DATE_DIM"
-WHERE "d_moy" BETWEEN 1 AND 3 AND "d_year" = 1999 AND "d_date_sk" IS NOT NULL) 
AS "t2" ON "t0"."ss_sold_date_sk" = "t2"."d_date_sk"
+WHERE "d_year" = 1999 AND "d_moy" BETWEEN 1 AND 3 AND "d_date_sk" IS NOT NULL) 
AS "t2" ON "t0"."ss_sold_date_sk" = "t2"."d_date_sk"
                     hive.sql.query.fieldNames ss_customer_sk0
                     hive.sql.query.fieldTypes int
                     hive.sql.query.split false
@@ -6503,7 +6503,7 @@ FROM "WEB_SALES"
 WHERE "ws_bill_customer_sk" IS NOT NULL AND "ws_sold_date_sk" IS NOT NULL) AS 
"t0"
 INNER JOIN (SELECT "d_date_sk"
 FROM "DATE_DIM"
-WHERE "d_moy" BETWEEN 1 AND 3 AND "d_year" = 1999 AND "d_date_sk" IS NOT NULL) 
AS "t2" ON "t0"."ws_sold_date_sk" = "t2"."d_date_sk"
+WHERE "d_year" = 1999 AND "d_moy" BETWEEN 1 AND 3 AND "d_date_sk" IS NOT NULL) 
AS "t2" ON "t0"."ws_sold_date_sk" = "t2"."d_date_sk"
 GROUP BY "t0"."ws_bill_customer_sk"
                     hive.sql.query.fieldNames literalTrue,ws_bill_customer_sk0
                     hive.sql.query.fieldTypes boolean,int
@@ -6533,7 +6533,7 @@ FROM "CATALOG_SALES"
 WHERE "cs_ship_customer_sk" IS NOT NULL AND "cs_sold_date_sk" IS NOT NULL) AS 
"t0"
 INNER JOIN (SELECT "d_date_sk"
 FROM "DATE_DIM"
-WHERE "d_moy" BETWEEN 1 AND 3 AND "d_year" = 1999 AND "d_date_sk" IS NOT NULL) 
AS "t2" ON "t0"."cs_sold_date_sk" = "t2"."d_date_sk"
+WHERE "d_year" = 1999 AND "d_moy" BETWEEN 1 AND 3 AND "d_date_sk" IS NOT NULL) 
AS "t2" ON "t0"."cs_sold_date_sk" = "t2"."d_date_sk"
 GROUP BY "t0"."cs_ship_customer_sk"
                     hive.sql.query.fieldNames literalTrue,cs_ship_customer_sk0
                     hive.sql.query.fieldTypes boolean,int
diff --git 
a/ql/src/test/results/clientpositive/llap/fold_eq_with_case_when.q.out 
b/ql/src/test/results/clientpositive/llap/fold_eq_with_case_when.q.out
index e9dfff3..abbb7c7 100644
--- a/ql/src/test/results/clientpositive/llap/fold_eq_with_case_when.q.out
+++ b/ql/src/test/results/clientpositive/llap/fold_eq_with_case_when.q.out
@@ -50,10 +50,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: lineitem
-                  filterExpr: ((l_shipmode = 'RAIL') and (DATE'1996-03-30' = 
to_date(CAST( l_shipdate AS TIMESTAMP)))) (type: boolean)
+                  filterExpr: ((l_shipmode = 'RAIL') and (to_date(CAST( 
l_shipdate AS TIMESTAMP)) = DATE'1996-03-30')) (type: boolean)
                   Statistics: Num rows: 100 Data size: 19000 Basic stats: 
COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: ((l_shipmode = 'RAIL') and (DATE'1996-03-30' = 
to_date(CAST( l_shipdate AS TIMESTAMP)))) (type: boolean)
+                    predicate: ((l_shipmode = 'RAIL') and (to_date(CAST( 
l_shipdate AS TIMESTAMP)) = DATE'1996-03-30')) (type: boolean)
                     Statistics: Num rows: 7 Data size: 1330 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: l_orderkey (type: int), 
(UDFToDouble(l_partkey) / 1000000.0D) (type: double)
diff --git a/ql/src/test/results/clientpositive/llap/join_filters_overlap.q.out 
b/ql/src/test/results/clientpositive/llap/join_filters_overlap.q.out
index 1322dc2..291442e 100644
--- a/ql/src/test/results/clientpositive/llap/join_filters_overlap.q.out
+++ b/ql/src/test/results/clientpositive/llap/join_filters_overlap.q.out
@@ -23,10 +23,10 @@ FROM (SELECT `key`, `value`, `value` = 60 AS `=`, `value` = 
50 AS `=3`
 FROM `default`.`a_n4`) AS `t`
 LEFT JOIN (SELECT `key`, CAST(50 AS INTEGER) AS `value`
 FROM `default`.`a_n4`
-WHERE `value` = 50 AND `key` IS NOT NULL) AS `t1` ON `t`.`key` = `t1`.`key` 
AND `t`.`=3`
+WHERE `value` = 50 AND `key` IS NOT NULL) AS `t1` ON `t`.`=3` AND `t`.`key` = 
`t1`.`key`
 LEFT JOIN (SELECT `key`, CAST(60 AS INTEGER) AS `value`
 FROM `default`.`a_n4`
-WHERE `value` = 60 AND `key` IS NOT NULL) AS `t3` ON `t`.`key` = `t3`.`key` 
AND `t`.`=`
+WHERE `value` = 60 AND `key` IS NOT NULL) AS `t3` ON `t`.`=` AND `t`.`key` = 
`t3`.`key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -301,10 +301,10 @@ FROM (SELECT `key`, CAST(50 AS INTEGER) AS `value`
 FROM `default`.`a_n4`
 WHERE `value` = 50 AND `key` IS NOT NULL) AS `t0`
 RIGHT JOIN (SELECT `key`, `value`, `value` = 60 AS `=`, `value` = 50 AS `=3`
-FROM `default`.`a_n4`) AS `t1` ON `t0`.`key` = `t1`.`key` AND `t1`.`=3`
+FROM `default`.`a_n4`) AS `t1` ON `t1`.`=3` AND `t0`.`key` = `t1`.`key`
 LEFT JOIN (SELECT `key`, CAST(60 AS INTEGER) AS `value`
 FROM `default`.`a_n4`
-WHERE `value` = 60 AND `key` IS NOT NULL) AS `t3` ON `t1`.`key` = `t3`.`key` 
AND `t1`.`=`
+WHERE `value` = 60 AND `key` IS NOT NULL) AS `t3` ON `t1`.`=` AND `t1`.`key` = 
`t3`.`key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -579,10 +579,10 @@ FROM (SELECT `key`, CAST(50 AS INTEGER) AS `value`
 FROM `default`.`a_n4`
 WHERE `value` = 50 AND `key` IS NOT NULL) AS `t0`
 RIGHT JOIN (SELECT `key`, `value`, `value` = 60 AS `=`, `value` = 50 AS `=3`
-FROM `default`.`a_n4`) AS `t1` ON `t0`.`key` = `t1`.`key` AND `t1`.`=3`
+FROM `default`.`a_n4`) AS `t1` ON `t1`.`=3` AND `t0`.`key` = `t1`.`key`
 LEFT JOIN (SELECT `key`, CAST(60 AS INTEGER) AS `value`
 FROM `default`.`a_n4`
-WHERE `value` = 60 AND `key` IS NOT NULL) AS `t3` ON `t1`.`key` = `t3`.`key` 
AND `t1`.`=`
+WHERE `value` = 60 AND `key` IS NOT NULL) AS `t3` ON `t1`.`=` AND `t1`.`key` = 
`t3`.`key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -856,13 +856,13 @@ OPTIMIZED SQL: SELECT `t`.`key`, `t`.`value`, `t0`.`key` 
AS `key1`, `t0`.`value`
 FROM (SELECT `key`, `value`, `value` = 40 AS `=`, `value` = 50 AS `=3`
 FROM `default`.`a_n4`) AS `t`
 FULL JOIN (SELECT `key`, `value`, `value` = 60 AS `=`, `value` = 50 AS `=3`
-FROM `default`.`a_n4`) AS `t0` ON `t`.`key` = `t0`.`key` AND `t`.`=3` AND 
`t0`.`=3`
+FROM `default`.`a_n4`) AS `t0` ON `t`.`=3` AND `t0`.`=3` AND `t`.`key` = 
`t0`.`key`
 LEFT JOIN (SELECT `key`, CAST(60 AS INTEGER) AS `value`
 FROM `default`.`a_n4`
-WHERE `value` = 60 AND `key` IS NOT NULL) AS `t2` ON `t0`.`key` = `t2`.`key` 
AND `t0`.`=`
+WHERE `value` = 60 AND `key` IS NOT NULL) AS `t2` ON `t0`.`=` AND `t0`.`key` = 
`t2`.`key`
 LEFT JOIN (SELECT `key`, CAST(40 AS INTEGER) AS `value`
 FROM `default`.`a_n4`
-WHERE `value` = 40 AND `key` IS NOT NULL) AS `t4` ON `t`.`key` = `t4`.`key` 
AND `t`.`=`
+WHERE `value` = 40 AND `key` IS NOT NULL) AS `t4` ON `t`.`=` AND `t`.`key` = 
`t4`.`key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1173,13 +1173,13 @@ FROM (SELECT `key`, `value`, `value` = 40 AS `=`, 
`value` = 60 AS `=3`, `value`
 FROM `default`.`a_n4`) AS `t`
 LEFT JOIN (SELECT `key`, CAST(50 AS INTEGER) AS `value`
 FROM `default`.`a_n4`
-WHERE `value` = 50 AND `key` IS NOT NULL) AS `t1` ON `t`.`key` = `t1`.`key` 
AND `t`.`=4`
+WHERE `value` = 50 AND `key` IS NOT NULL) AS `t1` ON `t`.`=4` AND `t`.`key` = 
`t1`.`key`
 LEFT JOIN (SELECT `key`, CAST(60 AS INTEGER) AS `value`
 FROM `default`.`a_n4`
-WHERE `value` = 60 AND `key` IS NOT NULL) AS `t3` ON `t`.`key` = `t3`.`key` 
AND `t`.`=3`
+WHERE `value` = 60 AND `key` IS NOT NULL) AS `t3` ON `t`.`=3` AND `t`.`key` = 
`t3`.`key`
 LEFT JOIN (SELECT `key`, CAST(40 AS INTEGER) AS `value`
 FROM `default`.`a_n4`
-WHERE `value` = 40 AND `key` IS NOT NULL) AS `t5` ON `t`.`key` = `t5`.`key` 
AND `t`.`=`
+WHERE `value` = 40 AND `key` IS NOT NULL) AS `t5` ON `t`.`=` AND `t`.`key` = 
`t5`.`key`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
diff --git a/ql/src/test/results/clientpositive/llap/masking_mv.q.out 
b/ql/src/test/results/clientpositive/llap/masking_mv.q.out
index b0168dc..792ea95 100644
--- a/ql/src/test/results/clientpositive/llap/masking_mv.q.out
+++ b/ql/src/test/results/clientpositive/llap/masking_mv.q.out
@@ -198,9 +198,9 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: default.masking_test_view_n_mv
-          filterExpr: ((key < 10) and (0 = (key % 2))) (type: boolean)
+          filterExpr: ((key < 10) and ((key % 2) = 0)) (type: boolean)
           Filter Operator
-            predicate: ((key < 10) and (0 = (key % 2))) (type: boolean)
+            predicate: ((key < 10) and ((key % 2) = 0)) (type: boolean)
             Select Operator
               expressions: key (type: int)
               outputColumnNames: _col0
@@ -262,10 +262,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.masking_test_view_n_mv
-                  filterExpr: ((key < 10) and (0 = (key % 2))) (type: boolean)
+                  filterExpr: ((key < 10) and ((key % 2) = 0)) (type: boolean)
                   Statistics: Num rows: 500 Data size: 2000 Basic stats: 
COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: ((key < 10) and (0 = (key % 2))) (type: boolean)
+                    predicate: ((key < 10) and ((key % 2) = 0)) (type: boolean)
                     Statistics: Num rows: 5 Data size: 20 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Group By Operator
                       keys: key (type: int)
@@ -536,10 +536,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.masking_test_view_n_mv_4
-                  filterExpr: ((key < 10) and (0 = (key % 2))) (type: boolean)
+                  filterExpr: ((key < 10) and ((key % 2) = 0)) (type: boolean)
                   Statistics: Num rows: 500 Data size: 47500 Basic stats: 
COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: ((key < 10) and (0 = (key % 2))) (type: boolean)
+                    predicate: ((key < 10) and ((key % 2) = 0)) (type: boolean)
                     Statistics: Num rows: 5 Data size: 475 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: reverse(value) (type: string)
@@ -627,10 +627,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.masking_test_view_n_mv_4
-                  filterExpr: ((key < 10) and (0 = (key % 2))) (type: boolean)
+                  filterExpr: ((key < 10) and ((key % 2) = 0)) (type: boolean)
                   Statistics: Num rows: 500 Data size: 47500 Basic stats: 
COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: ((key < 10) and (0 = (key % 2))) (type: boolean)
+                    predicate: ((key < 10) and ((key % 2) = 0)) (type: boolean)
                     Statistics: Num rows: 5 Data size: 475 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: reverse(value) (type: string), key (type: 
int)
diff --git 
a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb.q.out 
b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb.q.out
index 16a98ff..b1a9df9 100644
--- 
a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb.q.out
+++ 
b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb.q.out
@@ -646,10 +646,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv_n0
-                  filterExpr: ((lo_quantity < 25.0D) and lo_discount BETWEEN 
1.0D AND 3.0D and (d_year = 1993)) (type: boolean)
+                  filterExpr: ((lo_quantity < 25.0D) and (d_year = 1993) and 
lo_discount BETWEEN 1.0D AND 3.0D) (type: boolean)
                   Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: ((lo_quantity < 25.0D) and lo_discount BETWEEN 
1.0D AND 3.0D and (d_year = 1993)) (type: boolean)
+                    predicate: ((lo_quantity < 25.0D) and (d_year = 1993) and 
lo_discount BETWEEN 1.0D AND 3.0D) (type: boolean)
                     Statistics: Num rows: 1 Data size: 28 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: discounted_price (type: double)
@@ -736,10 +736,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv_n0
-                  filterExpr: (lo_quantity BETWEEN 26.0D AND 35.0D and 
lo_discount BETWEEN 4.0D AND 6.0D and (d_yearmonthnum = 199401)) (type: boolean)
+                  filterExpr: ((d_yearmonthnum = 199401) and lo_quantity 
BETWEEN 26.0D AND 35.0D and lo_discount BETWEEN 4.0D AND 6.0D) (type: boolean)
                   Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: (lo_quantity BETWEEN 26.0D AND 35.0D and 
lo_discount BETWEEN 4.0D AND 6.0D and (d_yearmonthnum = 199401)) (type: boolean)
+                    predicate: ((d_yearmonthnum = 199401) and lo_quantity 
BETWEEN 26.0D AND 35.0D and lo_discount BETWEEN 4.0D AND 6.0D) (type: boolean)
                     Statistics: Num rows: 1 Data size: 28 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: discounted_price (type: double)
@@ -828,10 +828,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv_n0
-                  filterExpr: (lo_quantity BETWEEN 26.0D AND 35.0D and 
lo_discount BETWEEN 5.0D AND 7.0D and (d_year = 1994) and (d_weeknuminyear = 
6)) (type: boolean)
+                  filterExpr: ((d_year = 1994) and (d_weeknuminyear = 6) and 
lo_quantity BETWEEN 26.0D AND 35.0D and lo_discount BETWEEN 5.0D AND 7.0D) 
(type: boolean)
                   Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: (lo_quantity BETWEEN 26.0D AND 35.0D and 
lo_discount BETWEEN 5.0D AND 7.0D and (d_year = 1994) and (d_weeknuminyear = 
6)) (type: boolean)
+                    predicate: ((d_year = 1994) and (d_weeknuminyear = 6) and 
lo_quantity BETWEEN 26.0D AND 35.0D and lo_discount BETWEEN 5.0D AND 7.0D) 
(type: boolean)
                     Statistics: Num rows: 1 Data size: 32 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: discounted_price (type: double)
@@ -1059,10 +1059,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv_n0
-                  filterExpr: (p_brand1 BETWEEN 'MFGR#2221' AND 'MFGR#2228' 
and (s_region = 'ASIA')) (type: boolean)
+                  filterExpr: ((s_region = 'ASIA') and p_brand1 BETWEEN 
'MFGR#2221' AND 'MFGR#2228') (type: boolean)
                   Statistics: Num rows: 1 Data size: 180 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: (p_brand1 BETWEEN 'MFGR#2221' AND 'MFGR#2228' 
and (s_region = 'ASIA')) (type: boolean)
+                    predicate: ((s_region = 'ASIA') and p_brand1 BETWEEN 
'MFGR#2221' AND 'MFGR#2228') (type: boolean)
                     Statistics: Num rows: 1 Data size: 180 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: d_year (type: int), p_brand1 (type: 
string), lo_revenue (type: double)
@@ -1315,10 +1315,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv_n0
-                  filterExpr: (d_year BETWEEN 1992 AND 1997 and (c_region = 
'ASIA') and (s_region = 'ASIA')) (type: boolean)
+                  filterExpr: ((c_region = 'ASIA') and (s_region = 'ASIA') and 
d_year BETWEEN 1992 AND 1997) (type: boolean)
                   Statistics: Num rows: 1 Data size: 348 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: (d_year BETWEEN 1992 AND 1997 and (c_region = 
'ASIA') and (s_region = 'ASIA')) (type: boolean)
+                    predicate: ((c_region = 'ASIA') and (s_region = 'ASIA') 
and d_year BETWEEN 1992 AND 1997) (type: boolean)
                     Statistics: Num rows: 1 Data size: 348 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: c_nation (type: string), d_year (type: 
int), s_nation (type: string), lo_revenue (type: double)
@@ -1443,10 +1443,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv_n0
-                  filterExpr: (d_year BETWEEN 1992 AND 1997 and (c_nation = 
'UNITED STATES') and (s_nation = 'UNITED STATES')) (type: boolean)
+                  filterExpr: ((c_nation = 'UNITED STATES') and (s_nation = 
'UNITED STATES') and d_year BETWEEN 1992 AND 1997) (type: boolean)
                   Statistics: Num rows: 1 Data size: 348 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: (d_year BETWEEN 1992 AND 1997 and (c_nation = 
'UNITED STATES') and (s_nation = 'UNITED STATES')) (type: boolean)
+                    predicate: ((c_nation = 'UNITED STATES') and (s_nation = 
'UNITED STATES') and d_year BETWEEN 1992 AND 1997) (type: boolean)
                     Statistics: Num rows: 1 Data size: 348 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: c_city (type: string), d_year (type: int), 
s_city (type: string), lo_revenue (type: double)
@@ -1695,10 +1695,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv_n0
-                  filterExpr: ((c_city) IN ('UNITED KI1', 'UNITED KI5') and 
(s_city) IN ('UNITED KI1', 'UNITED KI5') and (d_yearmonth = 'Dec1997')) (type: 
boolean)
+                  filterExpr: ((d_yearmonth = 'Dec1997') and (c_city) IN 
('UNITED KI1', 'UNITED KI5') and (s_city) IN ('UNITED KI1', 'UNITED KI5')) 
(type: boolean)
                   Statistics: Num rows: 1 Data size: 264 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: ((c_city) IN ('UNITED KI1', 'UNITED KI5') and 
(s_city) IN ('UNITED KI1', 'UNITED KI5') and (d_yearmonth = 'Dec1997')) (type: 
boolean)
+                    predicate: ((d_yearmonth = 'Dec1997') and (c_city) IN 
('UNITED KI1', 'UNITED KI5') and (s_city) IN ('UNITED KI1', 'UNITED KI5')) 
(type: boolean)
                     Statistics: Num rows: 1 Data size: 264 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: c_city (type: string), d_year (type: int), 
s_city (type: string), lo_revenue (type: double)
@@ -1829,10 +1829,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv_n0
-                  filterExpr: ((p_mfgr) IN ('MFGR#1', 'MFGR#2') and (c_region 
= 'AMERICA') and (s_region = 'AMERICA')) (type: boolean)
+                  filterExpr: ((c_region = 'AMERICA') and (s_region = 
'AMERICA') and (p_mfgr) IN ('MFGR#1', 'MFGR#2')) (type: boolean)
                   Statistics: Num rows: 1 Data size: 348 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: ((p_mfgr) IN ('MFGR#1', 'MFGR#2') and (c_region 
= 'AMERICA') and (s_region = 'AMERICA')) (type: boolean)
+                    predicate: ((c_region = 'AMERICA') and (s_region = 
'AMERICA') and (p_mfgr) IN ('MFGR#1', 'MFGR#2')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 348 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: c_nation (type: string), d_year (type: 
int), net_revenue (type: double)
@@ -1961,10 +1961,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv_n0
-                  filterExpr: ((d_year) IN (1997, 1998) and (p_mfgr) IN 
('MFGR#1', 'MFGR#2') and (c_region = 'AMERICA') and (s_region = 'AMERICA')) 
(type: boolean)
+                  filterExpr: ((c_region = 'AMERICA') and (s_region = 
'AMERICA') and (d_year) IN (1997, 1998) and (p_mfgr) IN ('MFGR#1', 'MFGR#2')) 
(type: boolean)
                   Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: ((d_year) IN (1997, 1998) and (p_mfgr) IN 
('MFGR#1', 'MFGR#2') and (c_region = 'AMERICA') and (s_region = 'AMERICA')) 
(type: boolean)
+                    predicate: ((c_region = 'AMERICA') and (s_region = 
'AMERICA') and (d_year) IN (1997, 1998) and (p_mfgr) IN ('MFGR#1', 'MFGR#2')) 
(type: boolean)
                     Statistics: Num rows: 1 Data size: 432 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: d_year (type: int), p_category (type: 
string), s_nation (type: string), net_revenue (type: double)
@@ -2093,10 +2093,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv_n0
-                  filterExpr: ((d_year) IN (1997, 1998) and (c_region = 
'AMERICA') and (p_category = 'MFGR#14') and (s_nation = 'UNITED STATES')) 
(type: boolean)
+                  filterExpr: ((c_region = 'AMERICA') and (p_category = 
'MFGR#14') and (s_nation = 'UNITED STATES') and (d_year) IN (1997, 1998)) 
(type: boolean)
                   Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: ((d_year) IN (1997, 1998) and (c_region = 
'AMERICA') and (p_category = 'MFGR#14') and (s_nation = 'UNITED STATES')) 
(type: boolean)
+                    predicate: ((c_region = 'AMERICA') and (p_category = 
'MFGR#14') and (s_nation = 'UNITED STATES') and (d_year) IN (1997, 1998)) 
(type: boolean)
                     Statistics: Num rows: 1 Data size: 432 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: d_year (type: int), p_brand1 (type: 
string), s_city (type: string), net_revenue (type: double)
diff --git 
a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_2.q.out 
b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_2.q.out
index fc84498..2159db0 100644
--- 
a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_2.q.out
+++ 
b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_2.q.out
@@ -648,10 +648,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv
-                  filterExpr: ((UDFToDouble(lo_quantity) < 25.0D) and 
UDFToDouble(lo_discount) BETWEEN 1.0D AND 3.0D and (UDFToInteger(d_year) = 
1993)) (type: boolean)
+                  filterExpr: ((UDFToDouble(lo_quantity) < 25.0D) and 
(UDFToInteger(d_year) = 1993) and UDFToDouble(lo_discount) BETWEEN 1.0D AND 
3.0D) (type: boolean)
                   Statistics: Num rows: 1 Data size: 260 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: ((UDFToDouble(lo_quantity) < 25.0D) and 
UDFToDouble(lo_discount) BETWEEN 1.0D AND 3.0D and (UDFToInteger(d_year) = 
1993)) (type: boolean)
+                    predicate: ((UDFToDouble(lo_quantity) < 25.0D) and 
(UDFToInteger(d_year) = 1993) and UDFToDouble(lo_discount) BETWEEN 1.0D AND 
3.0D) (type: boolean)
                     Statistics: Num rows: 1 Data size: 260 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: discounted_price (type: double)
@@ -738,10 +738,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv
-                  filterExpr: (UDFToDouble(lo_quantity) BETWEEN 26.0D AND 
35.0D and UDFToDouble(lo_discount) BETWEEN 4.0D AND 6.0D and 
(UDFToInteger(d_yearmonthnum) = 199401)) (type: boolean)
+                  filterExpr: ((UDFToInteger(d_yearmonthnum) = 199401) and 
UDFToDouble(lo_quantity) BETWEEN 26.0D AND 35.0D and UDFToDouble(lo_discount) 
BETWEEN 4.0D AND 6.0D) (type: boolean)
                   Statistics: Num rows: 1 Data size: 260 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: (UDFToDouble(lo_quantity) BETWEEN 26.0D AND 
35.0D and UDFToDouble(lo_discount) BETWEEN 4.0D AND 6.0D and 
(UDFToInteger(d_yearmonthnum) = 199401)) (type: boolean)
+                    predicate: ((UDFToInteger(d_yearmonthnum) = 199401) and 
UDFToDouble(lo_quantity) BETWEEN 26.0D AND 35.0D and UDFToDouble(lo_discount) 
BETWEEN 4.0D AND 6.0D) (type: boolean)
                     Statistics: Num rows: 1 Data size: 260 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: discounted_price (type: double)
@@ -830,10 +830,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv
-                  filterExpr: (UDFToDouble(lo_quantity) BETWEEN 26.0D AND 
35.0D and UDFToDouble(lo_discount) BETWEEN 5.0D AND 7.0D and 
(UDFToInteger(d_year) = 1994) and (UDFToInteger(d_weeknuminyear) = 6)) (type: 
boolean)
+                  filterExpr: ((UDFToInteger(d_year) = 1994) and 
(UDFToInteger(d_weeknuminyear) = 6) and UDFToDouble(lo_quantity) BETWEEN 26.0D 
AND 35.0D and UDFToDouble(lo_discount) BETWEEN 5.0D AND 7.0D) (type: boolean)
                   Statistics: Num rows: 1 Data size: 344 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: (UDFToDouble(lo_quantity) BETWEEN 26.0D AND 
35.0D and UDFToDouble(lo_discount) BETWEEN 5.0D AND 7.0D and 
(UDFToInteger(d_year) = 1994) and (UDFToInteger(d_weeknuminyear) = 6)) (type: 
boolean)
+                    predicate: ((UDFToInteger(d_year) = 1994) and 
(UDFToInteger(d_weeknuminyear) = 6) and UDFToDouble(lo_quantity) BETWEEN 26.0D 
AND 35.0D and UDFToDouble(lo_discount) BETWEEN 5.0D AND 7.0D) (type: boolean)
                     Statistics: Num rows: 1 Data size: 344 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: discounted_price (type: double)
@@ -1061,10 +1061,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv
-                  filterExpr: (p_brand1 BETWEEN 'MFGR#2221' AND 'MFGR#2228' 
and (s_region = 'ASIA')) (type: boolean)
+                  filterExpr: ((s_region = 'ASIA') and p_brand1 BETWEEN 
'MFGR#2221' AND 'MFGR#2228') (type: boolean)
                   Statistics: Num rows: 1 Data size: 260 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: (p_brand1 BETWEEN 'MFGR#2221' AND 'MFGR#2228' 
and (s_region = 'ASIA')) (type: boolean)
+                    predicate: ((s_region = 'ASIA') and p_brand1 BETWEEN 
'MFGR#2221' AND 'MFGR#2228') (type: boolean)
                     Statistics: Num rows: 1 Data size: 260 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: UDFToInteger(d_year) (type: int), p_brand1 
(type: string), lo_revenue (type: double)
@@ -1317,10 +1317,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv
-                  filterExpr: (UDFToInteger(d_year) BETWEEN 1992 AND 1997 and 
(c_region = 'ASIA') and (s_region = 'ASIA')) (type: boolean)
+                  filterExpr: ((c_region = 'ASIA') and (s_region = 'ASIA') and 
UDFToInteger(d_year) BETWEEN 1992 AND 1997) (type: boolean)
                   Statistics: Num rows: 1 Data size: 428 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: (UDFToInteger(d_year) BETWEEN 1992 AND 1997 and 
(c_region = 'ASIA') and (s_region = 'ASIA')) (type: boolean)
+                    predicate: ((c_region = 'ASIA') and (s_region = 'ASIA') 
and UDFToInteger(d_year) BETWEEN 1992 AND 1997) (type: boolean)
                     Statistics: Num rows: 1 Data size: 428 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: c_nation (type: string), s_nation (type: 
string), UDFToInteger(d_year) (type: int), lo_revenue (type: double)
@@ -1445,10 +1445,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv
-                  filterExpr: (UDFToInteger(d_year) BETWEEN 1992 AND 1997 and 
(c_nation = 'UNITED STATES') and (s_nation = 'UNITED STATES')) (type: boolean)
+                  filterExpr: ((c_nation = 'UNITED STATES') and (s_nation = 
'UNITED STATES') and UDFToInteger(d_year) BETWEEN 1992 AND 1997) (type: boolean)
                   Statistics: Num rows: 1 Data size: 428 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: (UDFToInteger(d_year) BETWEEN 1992 AND 1997 and 
(c_nation = 'UNITED STATES') and (s_nation = 'UNITED STATES')) (type: boolean)
+                    predicate: ((c_nation = 'UNITED STATES') and (s_nation = 
'UNITED STATES') and UDFToInteger(d_year) BETWEEN 1992 AND 1997) (type: boolean)
                     Statistics: Num rows: 1 Data size: 428 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: c_city (type: string), s_city (type: 
string), UDFToInteger(d_year) (type: int), lo_revenue (type: double)
@@ -1701,10 +1701,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv
-                  filterExpr: ((c_city) IN ('UNITED KI1', 'UNITED KI5') and 
(s_city) IN ('UNITED KI1', 'UNITED KI5') and (d_yearmonth = 'Dec1997')) (type: 
boolean)
+                  filterExpr: ((d_yearmonth = 'Dec1997') and (c_city) IN 
('UNITED KI1', 'UNITED KI5') and (s_city) IN ('UNITED KI1', 'UNITED KI5')) 
(type: boolean)
                   Statistics: Num rows: 1 Data size: 344 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: ((c_city) IN ('UNITED KI1', 'UNITED KI5') and 
(s_city) IN ('UNITED KI1', 'UNITED KI5') and (d_yearmonth = 'Dec1997')) (type: 
boolean)
+                    predicate: ((d_yearmonth = 'Dec1997') and (c_city) IN 
('UNITED KI1', 'UNITED KI5') and (s_city) IN ('UNITED KI1', 'UNITED KI5')) 
(type: boolean)
                     Statistics: Num rows: 1 Data size: 344 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: c_city (type: string), s_city (type: 
string), UDFToInteger(d_year) (type: int), lo_revenue (type: double)
@@ -1835,10 +1835,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv
-                  filterExpr: ((p_mfgr) IN ('MFGR#1', 'MFGR#2') and (c_region 
= 'AMERICA') and (s_region = 'AMERICA')) (type: boolean)
+                  filterExpr: ((c_region = 'AMERICA') and (s_region = 
'AMERICA') and (p_mfgr) IN ('MFGR#1', 'MFGR#2')) (type: boolean)
                   Statistics: Num rows: 1 Data size: 428 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: ((p_mfgr) IN ('MFGR#1', 'MFGR#2') and (c_region 
= 'AMERICA') and (s_region = 'AMERICA')) (type: boolean)
+                    predicate: ((c_region = 'AMERICA') and (s_region = 
'AMERICA') and (p_mfgr) IN ('MFGR#1', 'MFGR#2')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 428 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: UDFToInteger(d_year) (type: int), c_nation 
(type: string), net_revenue (type: double)
@@ -1967,10 +1967,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv
-                  filterExpr: ((UDFToInteger(d_year)) IN (1997, 1998) and 
(p_mfgr) IN ('MFGR#1', 'MFGR#2') and (c_region = 'AMERICA') and (s_region = 
'AMERICA')) (type: boolean)
+                  filterExpr: ((c_region = 'AMERICA') and (s_region = 
'AMERICA') and (UDFToInteger(d_year)) IN (1997, 1998) and (p_mfgr) IN 
('MFGR#1', 'MFGR#2')) (type: boolean)
                   Statistics: Num rows: 1 Data size: 512 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: ((UDFToInteger(d_year)) IN (1997, 1998) and 
(p_mfgr) IN ('MFGR#1', 'MFGR#2') and (c_region = 'AMERICA') and (s_region = 
'AMERICA')) (type: boolean)
+                    predicate: ((c_region = 'AMERICA') and (s_region = 
'AMERICA') and (UDFToInteger(d_year)) IN (1997, 1998) and (p_mfgr) IN 
('MFGR#1', 'MFGR#2')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 512 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: UDFToInteger(d_year) (type: int), s_nation 
(type: string), p_category (type: string), net_revenue (type: double)
@@ -2099,10 +2099,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: default.ssb_mv
-                  filterExpr: ((UDFToInteger(d_year)) IN (1997, 1998) and 
(c_region = 'AMERICA') and (p_category = 'MFGR#14') and (s_nation = 'UNITED 
STATES')) (type: boolean)
+                  filterExpr: ((c_region = 'AMERICA') and (p_category = 
'MFGR#14') and (s_nation = 'UNITED STATES') and (UDFToInteger(d_year)) IN 
(1997, 1998)) (type: boolean)
                   Statistics: Num rows: 1 Data size: 512 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Filter Operator
-                    predicate: ((UDFToInteger(d_year)) IN (1997, 1998) and 
(c_region = 'AMERICA') and (p_category = 'MFGR#14') and (s_nation = 'UNITED 
STATES')) (type: boolean)
+                    predicate: ((c_region = 'AMERICA') and (p_category = 
'MFGR#14') and (s_nation = 'UNITED STATES') and (UDFToInteger(d_year)) IN 
(1997, 1998)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 512 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: UDFToInteger(d_year) (type: int), s_city 
(type: string), p_brand1 (type: string), net_revenue (type: double)
diff --git 
a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_grouping_sets.q.out
 
b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_grouping_sets.q.out
index 35e76eb..c4207d5 100644
--- 
a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_grouping_sets.q.out
+++ 
b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_grouping_sets.q.out
@@ -678,7 +678,7 @@ POSTHOOK: Input: default@ssb_mv_n3
 #### A masked pattern was here ####
 CBO PLAN:
 HiveAggregate(group=[{}], agg#0=[sum($16)])
-  HiveFilter(condition=[AND(>(2.5E1, $8), =(CAST($18):BIGINT NOT NULL, 29247), 
BETWEEN(false, $7, 1:DOUBLE, 3:DOUBLE), =(1993, $4))])
+  HiveFilter(condition=[AND(>(2.5E1, $8), =(CAST($18):BIGINT NOT NULL, 29247), 
=($4, 1993), BETWEEN(false, $7, 1:DOUBLE, 3:DOUBLE))])
     HiveTableScan(table=[[default, ssb_mv_n3]], 
table:alias=[default.ssb_mv_n3])
 
 PREHOOK: query: explain cbo
@@ -713,7 +713,7 @@ POSTHOOK: Input: default@ssb_mv_n3
 #### A masked pattern was here ####
 CBO PLAN:
 HiveAggregate(group=[{}], agg#0=[sum($16)])
-  HiveFilter(condition=[AND(=(CAST($18):BIGINT NOT NULL, 29247), 
BETWEEN(false, $8, 26:DOUBLE, 35:DOUBLE), BETWEEN(false, $7, 4:DOUBLE, 
6:DOUBLE), =(199401, $6))])
+  HiveFilter(condition=[AND(=(CAST($18):BIGINT NOT NULL, 29247), =($6, 
199401), BETWEEN(false, $8, 26:DOUBLE, 35:DOUBLE), BETWEEN(false, $7, 4:DOUBLE, 
6:DOUBLE))])
     HiveTableScan(table=[[default, ssb_mv_n3]], 
table:alias=[default.ssb_mv_n3])
 
 PREHOOK: query: explain cbo
@@ -750,7 +750,7 @@ POSTHOOK: Input: default@ssb_mv_n3
 #### A masked pattern was here ####
 CBO PLAN:
 HiveAggregate(group=[{}], agg#0=[sum($16)])
-  HiveFilter(condition=[AND(=(CAST($18):BIGINT NOT NULL, 29247), 
BETWEEN(false, $8, 26:DOUBLE, 35:DOUBLE), BETWEEN(false, $7, 5:DOUBLE, 
7:DOUBLE), =(1994, $4), =(6, $3))])
+  HiveFilter(condition=[AND(=(CAST($18):BIGINT NOT NULL, 29247), =($4, 1994), 
=($3, 6), BETWEEN(false, $8, 26:DOUBLE, 35:DOUBLE), BETWEEN(false, $7, 
5:DOUBLE, 7:DOUBLE))])
     HiveTableScan(table=[[default, ssb_mv_n3]], 
table:alias=[default.ssb_mv_n3])
 
 PREHOOK: query: explain cbo
@@ -852,7 +852,7 @@ CBO PLAN:
 HiveSortLimit(sort0=[$1], sort1=[$2], dir0=[ASC], dir1=[ASC])
   HiveProject($f2=[$2], d_year=[$0], p_brand1=[$1])
     HiveAggregate(group=[{4, 9}], agg#0=[sum($15)])
-      HiveFilter(condition=[AND(=(CAST($18):BIGINT NOT NULL, 2496), 
BETWEEN(false, $9, _UTF-16LE'MFGR#2221':VARCHAR(2147483647) CHARACTER SET 
"UTF-16LE", _UTF-16LE'MFGR#2228':VARCHAR(2147483647) CHARACTER SET "UTF-16LE"), 
=($14, _UTF-16LE'ASIA'))])
+      HiveFilter(condition=[AND(=(CAST($18):BIGINT NOT NULL, 2496), =($14, 
_UTF-16LE'ASIA'), BETWEEN(false, $9, _UTF-16LE'MFGR#2221':VARCHAR(2147483647) 
CHARACTER SET "UTF-16LE", _UTF-16LE'MFGR#2228':VARCHAR(2147483647) CHARACTER 
SET "UTF-16LE"))])
         HiveTableScan(table=[[default, ssb_mv_n3]], 
table:alias=[default.ssb_mv_n3])
 
 PREHOOK: query: explain cbo
@@ -959,7 +959,7 @@ CBO PLAN:
 HiveSortLimit(sort0=[$2], sort1=[$3], dir0=[ASC], dir1=[DESC])
   HiveProject(c_nation=[$0], s_nation=[$2], d_year=[$1], $f3=[$3])
     HiveAggregate(group=[{1, 4, 13}], agg#0=[sum($15)])
-      HiveFilter(condition=[AND(=(CAST($18):BIGINT NOT NULL, 2496), 
BETWEEN(false, $4, 1992, 1997), =($2, _UTF-16LE'ASIA'), =($14, 
_UTF-16LE'ASIA'))])
+      HiveFilter(condition=[AND(=(CAST($18):BIGINT NOT NULL, 2496), =($2, 
_UTF-16LE'ASIA'), =($14, _UTF-16LE'ASIA'), BETWEEN(false, $4, 1992, 1997))])
         HiveTableScan(table=[[default, ssb_mv_n3]], 
table:alias=[default.ssb_mv_n3])
 
 PREHOOK: query: explain cbo
@@ -1012,7 +1012,7 @@ CBO PLAN:
 HiveSortLimit(sort0=[$2], sort1=[$3], dir0=[ASC], dir1=[DESC])
   HiveProject(c_city=[$0], s_city=[$2], d_year=[$1], $f3=[$3])
     HiveAggregate(group=[{0, 4, 12}], agg#0=[sum($15)])
-      HiveFilter(condition=[AND(=(CAST($18):BIGINT NOT NULL, 2496), 
BETWEEN(false, $4, 1992, 1997), =($1, _UTF-16LE'UNITED STATES'), =($13, 
_UTF-16LE'UNITED STATES'))])
+      HiveFilter(condition=[AND(=(CAST($18):BIGINT NOT NULL, 2496), =($1, 
_UTF-16LE'UNITED STATES'), =($13, _UTF-16LE'UNITED STATES'), BETWEEN(false, $4, 
1992, 1997))])
         HiveTableScan(table=[[default, ssb_mv_n3]], 
table:alias=[default.ssb_mv_n3])
 
 PREHOOK: query: explain cbo
@@ -1065,7 +1065,7 @@ CBO PLAN:
 HiveSortLimit(sort0=[$2], sort1=[$3], dir0=[ASC], dir1=[DESC])
   HiveProject(c_city=[$0], s_city=[$2], d_year=[$1], $f3=[$3])
     HiveAggregate(group=[{0, 4, 12}], agg#0=[sum($15)])
-      HiveFilter(condition=[AND(IN($0, _UTF-16LE'UNITED 
KI1':VARCHAR(2147483647) CHARACTER SET "UTF-16LE", _UTF-16LE'UNITED 
KI5':VARCHAR(2147483647) CHARACTER SET "UTF-16LE"), IN($12, _UTF-16LE'UNITED 
KI1':VARCHAR(2147483647) CHARACTER SET "UTF-16LE", _UTF-16LE'UNITED 
KI5':VARCHAR(2147483647) CHARACTER SET "UTF-16LE"), =(CAST($18):BIGINT NOT 
NULL, 2496), BETWEEN(false, $4, 1992, 1997))])
+      HiveFilter(condition=[AND(=(CAST($18):BIGINT NOT NULL, 2496), IN($0, 
_UTF-16LE'UNITED KI1':VARCHAR(2147483647) CHARACTER SET "UTF-16LE", 
_UTF-16LE'UNITED KI5':VARCHAR(2147483647) CHARACTER SET "UTF-16LE"), IN($12, 
_UTF-16LE'UNITED KI1':VARCHAR(2147483647) CHARACTER SET "UTF-16LE", 
_UTF-16LE'UNITED KI5':VARCHAR(2147483647) CHARACTER SET "UTF-16LE"), 
BETWEEN(false, $4, 1992, 1997))])
         HiveTableScan(table=[[default, ssb_mv_n3]], 
table:alias=[default.ssb_mv_n3])
 
 PREHOOK: query: explain cbo
@@ -1118,7 +1118,7 @@ CBO PLAN:
 HiveSortLimit(sort0=[$2], sort1=[$3], dir0=[ASC], dir1=[DESC])
   HiveProject(c_city=[$0], s_city=[$2], d_year=[$1], $f3=[$3])
     HiveAggregate(group=[{0, 4, 12}], agg#0=[sum($15)])
-      HiveFilter(condition=[AND(IN($0, _UTF-16LE'UNITED 
KI1':VARCHAR(2147483647) CHARACTER SET "UTF-16LE", _UTF-16LE'UNITED 
KI5':VARCHAR(2147483647) CHARACTER SET "UTF-16LE"), IN($12, _UTF-16LE'UNITED 
KI1':VARCHAR(2147483647) CHARACTER SET "UTF-16LE", _UTF-16LE'UNITED 
KI5':VARCHAR(2147483647) CHARACTER SET "UTF-16LE"), =(CAST($18):BIGINT NOT 
NULL, 2496), =($5, _UTF-16LE'Dec1997'))])
+      HiveFilter(condition=[AND(=(CAST($18):BIGINT NOT NULL, 2496), IN($0, 
_UTF-16LE'UNITED KI1':VARCHAR(2147483647) CHARACTER SET "UTF-16LE", 
_UTF-16LE'UNITED KI5':VARCHAR(2147483647) CHARACTER SET "UTF-16LE"), IN($12, 
_UTF-16LE'UNITED KI1':VARCHAR(2147483647) CHARACTER SET "UTF-16LE", 
_UTF-16LE'UNITED KI5':VARCHAR(2147483647) CHARACTER SET "UTF-16LE"), =($5, 
_UTF-16LE'Dec1997'))])
         HiveTableScan(table=[[default, ssb_mv_n3]], 
table:alias=[default.ssb_mv_n3])
 
 PREHOOK: query: explain cbo
@@ -1177,7 +1177,7 @@ CBO PLAN:
 HiveSortLimit(sort0=[$0], sort1=[$1], dir0=[ASC], dir1=[ASC])
   HiveProject(d_year=[$1], c_nation=[$0], $f2=[$2])
     HiveAggregate(group=[{1, 4}], agg#0=[sum($17)])
-      HiveFilter(condition=[AND(IN($11, _UTF-16LE'MFGR#1':VARCHAR(2147483647) 
CHARACTER SET "UTF-16LE", _UTF-16LE'MFGR#2':VARCHAR(2147483647) CHARACTER SET 
"UTF-16LE"), =(CAST($18):BIGINT NOT NULL, 2496), =($2, _UTF-16LE'AMERICA'), 
=($14, _UTF-16LE'AMERICA'))])
+      HiveFilter(condition=[AND(=(CAST($18):BIGINT NOT NULL, 2496), IN($11, 
_UTF-16LE'MFGR#1':VARCHAR(2147483647) CHARACTER SET "UTF-16LE", 
_UTF-16LE'MFGR#2':VARCHAR(2147483647) CHARACTER SET "UTF-16LE"), =($2, 
_UTF-16LE'AMERICA'), =($14, _UTF-16LE'AMERICA'))])
         HiveTableScan(table=[[default, ssb_mv_n3]], 
table:alias=[default.ssb_mv_n3])
 
 PREHOOK: query: explain cbo
@@ -1238,7 +1238,7 @@ CBO PLAN:
 HiveSortLimit(sort0=[$0], sort1=[$1], sort2=[$2], dir0=[ASC], dir1=[ASC], 
dir2=[ASC])
   HiveProject(d_year=[$0], s_nation=[$2], p_category=[$1], $f3=[$3])
     HiveAggregate(group=[{4, 10, 13}], agg#0=[sum($17)])
-      HiveFilter(condition=[AND(IN($4, 1997, 1998), IN($11, 
_UTF-16LE'MFGR#1':VARCHAR(2147483647) CHARACTER SET "UTF-16LE", 
_UTF-16LE'MFGR#2':VARCHAR(2147483647) CHARACTER SET "UTF-16LE"), 
=(CAST($18):BIGINT NOT NULL, 2496), =($2, _UTF-16LE'AMERICA'), =($14, 
_UTF-16LE'AMERICA'))])
+      HiveFilter(condition=[AND(=(CAST($18):BIGINT NOT NULL, 2496), IN($4, 
1997, 1998), IN($11, _UTF-16LE'MFGR#1':VARCHAR(2147483647) CHARACTER SET 
"UTF-16LE", _UTF-16LE'MFGR#2':VARCHAR(2147483647) CHARACTER SET "UTF-16LE"), 
=($2, _UTF-16LE'AMERICA'), =($14, _UTF-16LE'AMERICA'))])
         HiveTableScan(table=[[default, ssb_mv_n3]], 
table:alias=[default.ssb_mv_n3])
 
 PREHOOK: query: explain cbo
@@ -1299,7 +1299,7 @@ CBO PLAN:
 HiveSortLimit(sort0=[$0], sort1=[$1], sort2=[$2], dir0=[ASC], dir1=[ASC], 
dir2=[ASC])
   HiveProject(d_year=[$0], s_city=[$2], p_brand1=[$1], $f3=[$3])
     HiveAggregate(group=[{4, 9, 12}], agg#0=[sum($17)])
-      HiveFilter(condition=[AND(IN($4, 1997, 1998), =(CAST($18):BIGINT NOT 
NULL, 2496), =($2, _UTF-16LE'AMERICA'), =($10, _UTF-16LE'MFGR#14'), =($13, 
_UTF-16LE'UNITED STATES'))])
+      HiveFilter(condition=[AND(=(CAST($18):BIGINT NOT NULL, 2496), IN($4, 
1997, 1998), =($2, _UTF-16LE'AMERICA'), =($10, _UTF-16LE'MFGR#14'), =($13, 
_UTF-16LE'UNITED STATES'))])
         HiveTableScan(table=[[default, ssb_mv_n3]], 
table:alias=[default.ssb_mv_n3])
 
 PREHOOK: query: DROP MATERIALIZED VIEW `ssb_mv_n3`
diff --git 
a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_grouping_sets_2.q.out
 
b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_grouping_sets_2.q.out
index c1a9892..1521c8c 100644
--- 
a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_grouping_sets_2.q.out
+++ 
b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_grouping_sets_2.q.out
@@ -707,7 +707,7 @@ CBO PLAN:
 HiveSortLimit(sort0=[$1], sort1=[$2], dir0=[ASC], dir1=[ASC])
   HiveProject($f2=[$2], d_year=[$1], p_brand1=[$0])
     HiveAggregate(group=[{0, 7}], agg#0=[sum($9)])
-      HiveFilter(condition=[AND(=(CAST($8):BIGINT NOT NULL, 0), BETWEEN(false, 
$0, _UTF-16LE'MFGR#2221':VARCHAR(2147483647) CHARACTER SET "UTF-16LE", 
_UTF-16LE'MFGR#2228':VARCHAR(2147483647) CHARACTER SET "UTF-16LE"), =($4, 
_UTF-16LE'ASIA'))])
+      HiveFilter(condition=[AND(=(CAST($8):BIGINT NOT NULL, 0), =($4, 
_UTF-16LE'ASIA'), BETWEEN(false, $0, _UTF-16LE'MFGR#2221':VARCHAR(2147483647) 
CHARACTER SET "UTF-16LE", _UTF-16LE'MFGR#2228':VARCHAR(2147483647) CHARACTER 
SET "UTF-16LE"))])
         HiveTableScan(table=[[default, ssb_mv_n4]], 
table:alias=[default.ssb_mv_n4])
 
 PREHOOK: query: explain cbo
diff --git a/ql/src/test/results/clientpositive/llap/multi_in_clause.q.out 
b/ql/src/test/results/clientpositive/llap/multi_in_clause.q.out
index 985c2a4..be366c2 100644
--- a/ql/src/test/results/clientpositive/llap/multi_in_clause.q.out
+++ b/ql/src/test/results/clientpositive/llap/multi_in_clause.q.out
@@ -1,20 +1,31 @@
-PREHOOK: query: create table very_simple_table_for_in_test (name STRING)
+PREHOOK: query: create table very_simple_table_for_in_test (name STRING, 
othername STRING)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@very_simple_table_for_in_test
-POSTHOOK: query: create table very_simple_table_for_in_test (name STRING)
+POSTHOOK: query: create table very_simple_table_for_in_test (name STRING, 
othername STRING)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@very_simple_table_for_in_test
-PREHOOK: query: insert into very_simple_table_for_in_test values ('a')
+PREHOOK: query: insert into very_simple_table_for_in_test values ('a', null)
 PREHOOK: type: QUERY
 PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@very_simple_table_for_in_test
-POSTHOOK: query: insert into very_simple_table_for_in_test values ('a')
+POSTHOOK: query: insert into very_simple_table_for_in_test values ('a', null)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@very_simple_table_for_in_test
 POSTHOOK: Lineage: very_simple_table_for_in_test.name SCRIPT []
+POSTHOOK: Lineage: very_simple_table_for_in_test.othername EXPRESSION []
+PREHOOK: query: insert into very_simple_table_for_in_test values (null, null)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@very_simple_table_for_in_test
+POSTHOOK: query: insert into very_simple_table_for_in_test values (null, null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@very_simple_table_for_in_test
+POSTHOOK: Lineage: very_simple_table_for_in_test.name EXPRESSION []
+POSTHOOK: Lineage: very_simple_table_for_in_test.othername EXPRESSION []
 PREHOOK: query: explain cbo
 select * from very_simple_table_for_in_test where name IN('g','r') AND name 
IN('a','b')
 PREHOOK: type: QUERY
@@ -27,7 +38,7 @@ POSTHOOK: Input: default@very_simple_table_for_in_test
 #### A masked pattern was here ####
 CBO PLAN:
 HiveSortLimit(fetch=[0])
-  HiveProject(name=[$0])
+  HiveProject(name=[$0], othername=[$1])
     HiveTableScan(table=[[default, very_simple_table_for_in_test]], 
table:alias=[very_simple_table_for_in_test])
 
 PREHOOK: query: select * from very_simple_table_for_in_test where name 
IN('g','r') AND name IN('a','b')
@@ -38,3 +49,51 @@ POSTHOOK: query: select * from very_simple_table_for_in_test 
where name IN('g','
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@very_simple_table_for_in_test
 #### A masked pattern was here ####
+PREHOOK: query: explain cbo
+select name IN('g','r') AND name IN('a','b') from very_simple_table_for_in_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@very_simple_table_for_in_test
+#### A masked pattern was here ####
+POSTHOOK: query: explain cbo
+select name IN('g','r') AND name IN('a','b') from very_simple_table_for_in_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@very_simple_table_for_in_test
+#### A masked pattern was here ####
+CBO PLAN:
+HiveProject(_o__c0=[AND(IS NULL($0), null)])
+  HiveTableScan(table=[[default, very_simple_table_for_in_test]], 
table:alias=[very_simple_table_for_in_test])
+
+PREHOOK: query: select name IN('g','r') AND name IN('a','b') from 
very_simple_table_for_in_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@very_simple_table_for_in_test
+#### A masked pattern was here ####
+POSTHOOK: query: select name IN('g','r') AND name IN('a','b') from 
very_simple_table_for_in_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@very_simple_table_for_in_test
+#### A masked pattern was here ####
+false
+NULL
+PREHOOK: query: explain cbo
+select name IN('g','r') AND name IN('a','b') AND othername IN('x', 'y') from 
very_simple_table_for_in_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@very_simple_table_for_in_test
+#### A masked pattern was here ####
+POSTHOOK: query: explain cbo
+select name IN('g','r') AND name IN('a','b') AND othername IN('x', 'y') from 
very_simple_table_for_in_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@very_simple_table_for_in_test
+#### A masked pattern was here ####
+CBO PLAN:
+HiveProject(_o__c0=[AND(IN($1, _UTF-16LE'x':VARCHAR(2147483647) CHARACTER SET 
"UTF-16LE", _UTF-16LE'y':VARCHAR(2147483647) CHARACTER SET "UTF-16LE"), IS 
NULL($0), null)])
+  HiveTableScan(table=[[default, very_simple_table_for_in_test]], 
table:alias=[very_simple_table_for_in_test])
+
+PREHOOK: query: select name IN('g','r') AND name IN('a','b') AND othername 
IN('x', 'y') from very_simple_table_for_in_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@very_simple_table_for_in_test
+#### A masked pattern was here ####
+POSTHOOK: query: select name IN('g','r') AND name IN('a','b') AND othername 
IN('x', 'y') from very_simple_table_for_in_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@very_simple_table_for_in_test
+#### A masked pattern was here ####
+false
+NULL
diff --git a/ql/src/test/results/clientpositive/llap/pcr.q.out 
b/ql/src/test/results/clientpositive/llap/pcr.q.out
index 0fdcfbe..78b3a58 100644
--- a/ql/src/test/results/clientpositive/llap/pcr.q.out
+++ b/ql/src/test/results/clientpositive/llap/pcr.q.out
@@ -4292,7 +4292,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 OPTIMIZED SQL: SELECT `key`, `value`, CAST('2008-04-08' AS STRING) AS `ds`, 
`hr`
 FROM (SELECT `key`, `value`, `hr`
 FROM `default`.`srcpart`
-WHERE `key` = 11 AND `hr` IN ('11', '12') AND `ds` = '2008-04-08'
+WHERE `key` = 11 AND `ds` = '2008-04-08' AND `hr` IN ('11', '12')
 ORDER BY `key`, `hr`) AS `t1`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -4310,7 +4310,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: ((UDFToDouble(key) = 11.0D) and (hr) IN ('11', 
'12') and (ds = '2008-04-08')) (type: boolean)
+                  filterExpr: ((UDFToDouble(key) = 11.0D) and (ds = 
'2008-04-08') and (hr) IN ('11', '12')) (type: boolean)
                   Statistics: Num rows: 1000 Data size: 362000 Basic stats: 
COMPLETE Column stats: COMPLETE
                   GatherStats: false
                   Filter Operator
diff --git a/ql/src/test/results/clientpositive/llap/ppd_union_view.q.out 
b/ql/src/test/results/clientpositive/llap/ppd_union_view.q.out
index 029b7f0..b62bc64 100644
--- a/ql/src/test/results/clientpositive/llap/ppd_union_view.q.out
+++ b/ql/src/test/results/clientpositive/llap/ppd_union_view.q.out
@@ -161,7 +161,7 @@ FROM `default`.`t1_old`
 WHERE `ds` = '2011-10-13' AND `keymap` IS NOT NULL) AS `t2`
 INNER JOIN (SELECT `key`, `keymap`
 FROM `default`.`t1_mapping`
-WHERE '2011-10-13' = `ds` AND `keymap` IS NOT NULL) AS `t4` ON `t2`.`keymap` = 
`t4`.`keymap`) AS `t6`
+WHERE `ds` = '2011-10-13' AND `keymap` IS NOT NULL) AS `t4` ON `t2`.`keymap` = 
`t4`.`keymap`) AS `t6`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -487,7 +487,7 @@ FROM `default`.`t1_old`
 WHERE `ds` = '2011-10-15' AND `keymap` IS NOT NULL) AS `t2`
 INNER JOIN (SELECT `key`, `keymap`
 FROM `default`.`t1_mapping`
-WHERE '2011-10-15' = `ds` AND `keymap` IS NOT NULL) AS `t4` ON `t2`.`keymap` = 
`t4`.`keymap`) AS `t6`
+WHERE `ds` = '2011-10-15' AND `keymap` IS NOT NULL) AS `t4` ON `t2`.`keymap` = 
`t4`.`keymap`) AS `t6`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -515,14 +515,14 @@ STAGE PLANS:
                     Select Operator
                       expressions: _col0 (type: string), _col1 (type: string), 
'2011-10-15' (type: string)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 2 Data size: 736 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 2 Data size: 544 Basic stats: 
COMPLETE Column stats: COMPLETE
                       File Output Operator
                         bucketingVersion: 2
                         compressed: false
                         GlobalTableId: 0
 #### A masked pattern was here ####
                         NumFilesPerFileSink: 1
-                        Statistics: Num rows: 2 Data size: 736 Basic stats: 
COMPLETE Column stats: PARTIAL
+                        Statistics: Num rows: 2 Data size: 544 Basic stats: 
COMPLETE Column stats: COMPLETE
 #### A masked pattern was here ####
                         table:
                             input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -622,16 +622,16 @@ STAGE PLANS:
                   filterExpr: ((ds = '2011-10-15') and keymap is not null) 
(type: boolean)
                   properties:
                     insideView TRUE
-                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE 
Column stats: PARTIAL
+                  Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE 
Column stats: COMPLETE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
                     predicate: ((ds = '2011-10-15') and keymap is not null) 
(type: boolean)
-                    Statistics: Num rows: 1 Data size: 552 Basic stats: 
COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 1 Data size: 352 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string), keymap (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 368 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 1 Data size: 168 Basic stats: 
COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         bucketingVersion: 2
                         key expressions: _col1 (type: string)
@@ -639,7 +639,7 @@ STAGE PLANS:
                         numBuckets: -1
                         sort order: +
                         Map-reduce partition columns: _col1 (type: string)
-                        Statistics: Num rows: 1 Data size: 368 Basic stats: 
COMPLETE Column stats: PARTIAL
+                        Statistics: Num rows: 1 Data size: 168 Basic stats: 
COMPLETE Column stats: COMPLETE
                         tag: 1
                         value expressions: _col0 (type: string)
                         auto parallelism: true
@@ -656,23 +656,23 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col1 (type: string)
                 outputColumnNames: _col1, _col2
-                Position of Big Table: 1
-                Statistics: Num rows: 1 Data size: 268 Basic stats: COMPLETE 
Column stats: PARTIAL
+                Position of Big Table: 0
+                Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE 
Column stats: COMPLETE
                 Select Operator
                   expressions: _col2 (type: string), _col1 (type: string)
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 1 Data size: 268 Basic stats: COMPLETE 
Column stats: PARTIAL
+                  Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE 
Column stats: COMPLETE
                   Select Operator
                     expressions: _col0 (type: string), _col1 (type: string), 
'2011-10-15' (type: string)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 2 Data size: 736 Basic stats: 
COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 2 Data size: 544 Basic stats: 
COMPLETE Column stats: COMPLETE
                     File Output Operator
                       bucketingVersion: 2
                       compressed: false
                       GlobalTableId: 0
 #### A masked pattern was here ####
                       NumFilesPerFileSink: 1
-                      Statistics: Num rows: 2 Data size: 736 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 2 Data size: 544 Basic stats: 
COMPLETE Column stats: COMPLETE
 #### A masked pattern was here ####
                       table:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
diff --git 
a/ql/src/test/results/clientpositive/llap/rand_partitionpruner2.q.out 
b/ql/src/test/results/clientpositive/llap/rand_partitionpruner2.q.out
index 5f4cc0c..081c7da 100644
--- a/ql/src/test/results/clientpositive/llap/rand_partitionpruner2.q.out
+++ b/ql/src/test/results/clientpositive/llap/rand_partitionpruner2.q.out
@@ -24,7 +24,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 POSTHOOK: Output: default@tmptable_n1
 OPTIMIZED SQL: SELECT `key`, `value`, CAST('2008-04-08' AS STRING) AS `ds`, 
`hr`
 FROM `default`.`srcpart`
-WHERE RAND(1) < 0.1 AND `ds` = '2008-04-08'
+WHERE `ds` = '2008-04-08' AND RAND(1) < 0.1
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
diff --git 
a/ql/src/test/results/clientpositive/llap/rand_partitionpruner3.q.out 
b/ql/src/test/results/clientpositive/llap/rand_partitionpruner3.q.out
index 7f3aae3..e3e3d87 100644
--- a/ql/src/test/results/clientpositive/llap/rand_partitionpruner3.q.out
+++ b/ql/src/test/results/clientpositive/llap/rand_partitionpruner3.q.out
@@ -10,7 +10,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 OPTIMIZED SQL: SELECT `key`, `value`, CAST('2008-04-08' AS STRING) AS `ds`, 
`hr`
 FROM `default`.`srcpart`
-WHERE CAST(`key` AS DOUBLE) BETWEEN 10 AND 50 AND RAND(1) < 0.1 AND `ds` = 
'2008-04-08' AND `hr` LIKE '%2'
+WHERE CAST(`key` AS DOUBLE) BETWEEN 10 AND 50 AND `ds` = '2008-04-08' AND 
RAND(1) < 0.1 AND `hr` LIKE '%2'
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
diff --git a/ql/src/test/results/clientpositive/llap/union22.q.out 
b/ql/src/test/results/clientpositive/llap/union22.q.out
index 7c09259..4e69602 100644
--- a/ql/src/test/results/clientpositive/llap/union22.q.out
+++ b/ql/src/test/results/clientpositive/llap/union22.q.out
@@ -90,7 +90,7 @@ FROM `default`.`dst_union22`
 WHERE `k1` > 20) AS `t2`
 LEFT JOIN (SELECT `k1`, `k3`, `k4`
 FROM `default`.`dst_union22_delta`
-WHERE `k0` > 50 AND `k1` > 20 AND `ds` = '1') AS `t4` ON `t2`.`k1` = `t4`.`k1` 
AND `t2`.`=`
+WHERE `k0` > 50 AND `k1` > 20 AND `ds` = '1') AS `t4` ON `t2`.`=` AND 
`t2`.`k1` = `t4`.`k1`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
diff --git a/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out 
b/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out
index 3d7d4de..23a6464 100644
--- a/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out
@@ -3909,7 +3909,7 @@ FROM `default`.`dst_union22_n0`
 WHERE `k1` > 20) AS `t2`
 LEFT JOIN (SELECT `k1`, `k3`, `k4`
 FROM `default`.`dst_union22_delta_n0`
-WHERE `k0` > 50 AND `k1` > 20 AND `ds` = '1') AS `t4` ON `t2`.`k1` = `t4`.`k1` 
AND `t2`.`=`)
+WHERE `k0` > 50 AND `k1` > 20 AND `ds` = '1') AS `t4` ON `t2`.`=` AND 
`t2`.`k1` = `t4`.`k1`)
 GROUP BY `k1`, `k2`, `k3`, `k4`
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out 
b/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out
index 6164d9a..f435127 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out
@@ -960,7 +960,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: vector_interval_2
-                  filterExpr: ((CAST( str1 AS INTERVAL YEAR TO MONTH) <= 
INTERVAL'1-3') and (CAST( str1 AS INTERVAL YEAR TO MONTH) < INTERVAL'1-3') and 
(CAST( str2 AS INTERVAL YEAR TO MONTH) >= INTERVAL'1-2') and (CAST( str2 AS 
INTERVAL YEAR TO MONTH) > INTERVAL'1-2') and (INTERVAL'1-2' <= CAST( str2 AS 
INTERVAL YEAR TO MONTH)) and (INTERVAL'1-2' < CAST( str2 AS INTERVAL YEAR TO 
MONTH)) and (INTERVAL'1-3' >= CAST( str1 AS INTERVAL YEAR TO MONTH)) and 
(INTERVAL'1-3' > CAST( str1 AS INTERV [...]
+                  filterExpr: ((CAST( str1 AS INTERVAL YEAR TO MONTH) <= 
INTERVAL'1-3') and (CAST( str1 AS INTERVAL YEAR TO MONTH) < INTERVAL'1-3') and 
(CAST( str2 AS INTERVAL YEAR TO MONTH) >= INTERVAL'1-2') and (CAST( str2 AS 
INTERVAL YEAR TO MONTH) > INTERVAL'1-2') and (INTERVAL'1-2' <= CAST( str2 AS 
INTERVAL YEAR TO MONTH)) and (INTERVAL'1-2' < CAST( str2 AS INTERVAL YEAR TO 
MONTH)) and (INTERVAL'1-3' >= CAST( str1 AS INTERVAL YEAR TO MONTH)) and 
(INTERVAL'1-3' > CAST( str1 AS INTERV [...]
                   Statistics: Num rows: 2 Data size: 428 Basic stats: COMPLETE 
Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
@@ -968,8 +968,8 @@ STAGE PLANS:
                     Filter Vectorization:
                         className: VectorFilterOperator
                         native: true
-                        predicateExpression: FilterExprAndExpr(children: 
FilterIntervalYearMonthColLessEqualIntervalYearMonthScalar(col 
7:interval_year_month, val 15)(children: CastStringToIntervalYearMonth(col 
2:string) -> 7:interval_year_month), 
FilterIntervalYearMonthColLessIntervalYearMonthScalar(col 
8:interval_year_month, val 15)(children: CastStringToIntervalYearMonth(col 
2:string) -> 8:interval_year_month), 
FilterIntervalYearMonthColGreaterEqualIntervalYearMonthScalar(col 9:interv [...]
-                    predicate: ((CAST( str1 AS INTERVAL YEAR TO MONTH) <= 
INTERVAL'1-3') and (CAST( str1 AS INTERVAL YEAR TO MONTH) < INTERVAL'1-3') and 
(CAST( str2 AS INTERVAL YEAR TO MONTH) >= INTERVAL'1-2') and (CAST( str2 AS 
INTERVAL YEAR TO MONTH) > INTERVAL'1-2') and (INTERVAL'1-2' <= CAST( str2 AS 
INTERVAL YEAR TO MONTH)) and (INTERVAL'1-2' < CAST( str2 AS INTERVAL YEAR TO 
MONTH)) and (INTERVAL'1-3' >= CAST( str1 AS INTERVAL YEAR TO MONTH)) and 
(INTERVAL'1-3' > CAST( str1 AS INTER [...]
+                        predicateExpression: FilterExprAndExpr(children: 
FilterIntervalYearMonthColLessEqualIntervalYearMonthScalar(col 
7:interval_year_month, val 15)(children: CastStringToIntervalYearMonth(col 
2:string) -> 7:interval_year_month), 
FilterIntervalYearMonthColLessIntervalYearMonthScalar(col 
8:interval_year_month, val 15)(children: CastStringToIntervalYearMonth(col 
2:string) -> 8:interval_year_month), 
FilterIntervalYearMonthColGreaterEqualIntervalYearMonthScalar(col 9:interv [...]
+                    predicate: ((CAST( str1 AS INTERVAL YEAR TO MONTH) <= 
INTERVAL'1-3') and (CAST( str1 AS INTERVAL YEAR TO MONTH) < INTERVAL'1-3') and 
(CAST( str2 AS INTERVAL YEAR TO MONTH) >= INTERVAL'1-2') and (CAST( str2 AS 
INTERVAL YEAR TO MONTH) > INTERVAL'1-2') and (INTERVAL'1-2' <= CAST( str2 AS 
INTERVAL YEAR TO MONTH)) and (INTERVAL'1-2' < CAST( str2 AS INTERVAL YEAR TO 
MONTH)) and (INTERVAL'1-3' >= CAST( str1 AS INTERVAL YEAR TO MONTH)) and 
(INTERVAL'1-3' > CAST( str1 AS INTER [...]
                     Statistics: Num rows: 1 Data size: 214 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ts (type: timestamp)
@@ -1160,7 +1160,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: vector_interval_2
-                  filterExpr: ((CAST( str3 AS INTERVAL DAY TO SECOND) <= 
INTERVAL'1 02:03:05.000000000') and (CAST( str3 AS INTERVAL DAY TO SECOND) < 
INTERVAL'1 02:03:05.000000000') and (CAST( str4 AS INTERVAL DAY TO SECOND) >= 
INTERVAL'1 02:03:04.000000000') and (CAST( str4 AS INTERVAL DAY TO SECOND) > 
INTERVAL'1 02:03:04.000000000') and (INTERVAL'1 02:03:04.000000000' <= CAST( 
str4 AS INTERVAL DAY TO SECOND)) and (INTERVAL'1 02:03:04.000000000' < CAST( 
str4 AS INTERVAL DAY TO SECOND))  [...]
+                  filterExpr: ((CAST( str3 AS INTERVAL DAY TO SECOND) <= 
INTERVAL'1 02:03:05.000000000') and (CAST( str3 AS INTERVAL DAY TO SECOND) < 
INTERVAL'1 02:03:05.000000000') and (CAST( str4 AS INTERVAL DAY TO SECOND) >= 
INTERVAL'1 02:03:04.000000000') and (CAST( str4 AS INTERVAL DAY TO SECOND) > 
INTERVAL'1 02:03:04.000000000') and (INTERVAL'1 02:03:04.000000000' <= CAST( 
str4 AS INTERVAL DAY TO SECOND)) and (INTERVAL'1 02:03:04.000000000' < CAST( 
str4 AS INTERVAL DAY TO SECOND))  [...]
                   Statistics: Num rows: 2 Data size: 444 Basic stats: COMPLETE 
Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
@@ -1168,8 +1168,8 @@ STAGE PLANS:
                     Filter Vectorization:
                         className: VectorFilterOperator
                         native: true
-                        predicateExpression: FilterExprAndExpr(children: 
FilterIntervalDayTimeColLessEqualIntervalDayTimeScalar(col 7:interval_day_time, 
val 1 02:03:05.000000000)(children: CastStringToIntervalDayTime(col 4:string) 
-> 7:interval_day_time), FilterIntervalDayTimeColLessIntervalDayTimeScalar(col 
8:interval_day_time, val 1 02:03:05.000000000)(children: 
CastStringToIntervalDayTime(col 4:string) -> 8:interval_day_time), 
FilterIntervalDayTimeColGreaterEqualIntervalDayTimeScalar( [...]
-                    predicate: ((CAST( str3 AS INTERVAL DAY TO SECOND) <= 
INTERVAL'1 02:03:05.000000000') and (CAST( str3 AS INTERVAL DAY TO SECOND) < 
INTERVAL'1 02:03:05.000000000') and (CAST( str4 AS INTERVAL DAY TO SECOND) >= 
INTERVAL'1 02:03:04.000000000') and (CAST( str4 AS INTERVAL DAY TO SECOND) > 
INTERVAL'1 02:03:04.000000000') and (INTERVAL'1 02:03:04.000000000' <= CAST( 
str4 AS INTERVAL DAY TO SECOND)) and (INTERVAL'1 02:03:04.000000000' < CAST( 
str4 AS INTERVAL DAY TO SECOND)) [...]
+                        predicateExpression: FilterExprAndExpr(children: 
FilterIntervalDayTimeColLessEqualIntervalDayTimeScalar(col 7:interval_day_time, 
val 1 02:03:05.000000000)(children: CastStringToIntervalDayTime(col 4:string) 
-> 7:interval_day_time), FilterIntervalDayTimeColLessIntervalDayTimeScalar(col 
8:interval_day_time, val 1 02:03:05.000000000)(children: 
CastStringToIntervalDayTime(col 4:string) -> 8:interval_day_time), 
FilterIntervalDayTimeColGreaterEqualIntervalDayTimeScalar( [...]
+                    predicate: ((CAST( str3 AS INTERVAL DAY TO SECOND) <= 
INTERVAL'1 02:03:05.000000000') and (CAST( str3 AS INTERVAL DAY TO SECOND) < 
INTERVAL'1 02:03:05.000000000') and (CAST( str4 AS INTERVAL DAY TO SECOND) >= 
INTERVAL'1 02:03:04.000000000') and (CAST( str4 AS INTERVAL DAY TO SECOND) > 
INTERVAL'1 02:03:04.000000000') and (INTERVAL'1 02:03:04.000000000' <= CAST( 
str4 AS INTERVAL DAY TO SECOND)) and (INTERVAL'1 02:03:04.000000000' < CAST( 
str4 AS INTERVAL DAY TO SECOND)) [...]
                     Statistics: Num rows: 1 Data size: 222 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ts (type: timestamp)
@@ -1350,7 +1350,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: vector_interval_2
-                  filterExpr: ((DATE'2002-03-01' = (dt + CAST( str1 AS 
INTERVAL YEAR TO MONTH))) and DATE'2002-03-01' BETWEEN (dt + CAST( str1 AS 
INTERVAL YEAR TO MONTH)) AND (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) and 
((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) = DATE'2002-03-01') and (dt + 
CAST( str1 AS INTERVAL YEAR TO MONTH)) BETWEEN DATE'2002-03-01' AND 
DATE'2002-03-01' and (dt <> (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and 
(DATE'2002-03-01' = (dt + INTERVAL'1-2')) and  [...]
+                  filterExpr: (((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) = 
DATE'2002-03-01') and ((dt + INTERVAL'1-2') = DATE'2002-03-01') and 
DATE'2002-03-01' BETWEEN (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) AND (dt + 
CAST( str1 AS INTERVAL YEAR TO MONTH)) and (dt + CAST( str1 AS INTERVAL YEAR TO 
MONTH)) BETWEEN DATE'2002-03-01' AND DATE'2002-03-01' and (dt <> (dt + CAST( 
str1 AS INTERVAL YEAR TO MONTH))) and (dt + INTERVAL'1-2') BETWEEN 
DATE'2002-03-01' AND DATE'2002-03-01' and [...]
                   Statistics: Num rows: 2 Data size: 366 Basic stats: COMPLETE 
Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
@@ -1358,8 +1358,8 @@ STAGE PLANS:
                     Filter Vectorization:
                         className: VectorFilterOperator
                         native: true
-                        predicateExpression: FilterExprAndExpr(children: 
FilterDateScalarEqualDateColumn(val 11747, col 8:date)(children: 
DateColAddIntervalYearMonthColumn(col 1:date, col 
7:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 
7:interval_year_month) -> 8:date), SelectColumnIsTrue(col 13:boolean)(children: 
VectorUDFAdaptor(DATE'2002-03-01' BETWEEN (dt + CAST( str1 AS INTERVAL YEAR TO 
MONTH)) AND (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)))(childre [...]
-                    predicate: ((DATE'2002-03-01' = (dt + CAST( str1 AS 
INTERVAL YEAR TO MONTH))) and DATE'2002-03-01' BETWEEN (dt + CAST( str1 AS 
INTERVAL YEAR TO MONTH)) AND (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) and 
((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) = DATE'2002-03-01') and (dt + 
CAST( str1 AS INTERVAL YEAR TO MONTH)) BETWEEN DATE'2002-03-01' AND 
DATE'2002-03-01' and (dt <> (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and 
(DATE'2002-03-01' = (dt + INTERVAL'1-2')) and [...]
+                        predicateExpression: FilterExprAndExpr(children: 
FilterDateColEqualDateScalar(col 8:date, val 11747)(children: 
DateColAddIntervalYearMonthColumn(col 1:date, col 
7:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 
7:interval_year_month) -> 8:date), FilterDateColEqualDateScalar(col 9:date, val 
11747)(children: DateColAddIntervalYearMonthScalar(col 1:date, val 1-2) -> 
9:date), SelectColumnIsTrue(col 14:boolean)(children: VectorUDFAdaptor(D [...]
+                    predicate: (((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) 
= DATE'2002-03-01') and ((dt + INTERVAL'1-2') = DATE'2002-03-01') and 
DATE'2002-03-01' BETWEEN (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) AND (dt + 
CAST( str1 AS INTERVAL YEAR TO MONTH)) and (dt + CAST( str1 AS INTERVAL YEAR TO 
MONTH)) BETWEEN DATE'2002-03-01' AND DATE'2002-03-01' and (dt <> (dt + CAST( 
str1 AS INTERVAL YEAR TO MONTH))) and (dt + INTERVAL'1-2') BETWEEN 
DATE'2002-03-01' AND DATE'2002-03-01' an [...]
                     Statistics: Num rows: 1 Data size: 183 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ts (type: timestamp)
@@ -1540,7 +1540,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: vector_interval_2
-                  filterExpr: ((TIMESTAMP'2002-02-01 01:02:03' < (ts + 
INTERVAL'1-2')) and (TIMESTAMP'2002-04-01 01:02:03' > (ts + INTERVAL'1-2')) and 
((ts + INTERVAL'1-2') > TIMESTAMP'2002-02-01 01:02:03') and ((ts + 
INTERVAL'1-2') < TIMESTAMP'2002-04-01 01:02:03') and (ts < (ts + 
INTERVAL'1-0')) and (ts > (ts - INTERVAL'1-0')) and (TIMESTAMP'2002-03-01 
01:02:03' = (ts + INTERVAL'1-2')) and TIMESTAMP'2002-03-01 01:02:03' BETWEEN 
(ts + INTERVAL'1-2') AND (ts + INTERVAL'1-2') and (TIMESTA [...]
+                  filterExpr: ((TIMESTAMP'2002-02-01 01:02:03' < (ts + 
INTERVAL'1-2')) and (TIMESTAMP'2002-04-01 01:02:03' > (ts + INTERVAL'1-2')) and 
((ts + INTERVAL'1-2') > TIMESTAMP'2002-02-01 01:02:03') and ((ts + 
INTERVAL'1-2') < TIMESTAMP'2002-04-01 01:02:03') and (ts < (ts + 
INTERVAL'1-0')) and (ts > (ts - INTERVAL'1-0')) and ((ts + INTERVAL'1-2') = 
TIMESTAMP'2002-03-01 01:02:03') and TIMESTAMP'2002-03-01 01:02:03' BETWEEN (ts 
+ INTERVAL'1-2') AND (ts + INTERVAL'1-2') and (TIMESTA [...]
                   Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE 
Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
@@ -1548,8 +1548,8 @@ STAGE PLANS:
                     Filter Vectorization:
                         className: VectorFilterOperator
                         native: true
-                        predicateExpression: FilterExprAndExpr(children: 
FilterTimestampScalarLessTimestampColumn(val 2002-02-01 01:02:03, col 
7:timestamp)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, 
val 1-2) -> 7:timestamp), FilterTimestampScalarGreaterTimestampColumn(val 
2002-04-01 01:02:03, col 8:timestamp)(children: 
TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 
8:timestamp), FilterTimestampColGreaterTimestampScalar(col 9:timestamp, val 
2002 [...]
-                    predicate: ((TIMESTAMP'2002-02-01 01:02:03' < (ts + 
INTERVAL'1-2')) and (TIMESTAMP'2002-04-01 01:02:03' > (ts + INTERVAL'1-2')) and 
((ts + INTERVAL'1-2') > TIMESTAMP'2002-02-01 01:02:03') and ((ts + 
INTERVAL'1-2') < TIMESTAMP'2002-04-01 01:02:03') and (ts < (ts + 
INTERVAL'1-0')) and (ts > (ts - INTERVAL'1-0')) and (TIMESTAMP'2002-03-01 
01:02:03' = (ts + INTERVAL'1-2')) and TIMESTAMP'2002-03-01 01:02:03' BETWEEN 
(ts + INTERVAL'1-2') AND (ts + INTERVAL'1-2') and (TIMEST [...]
+                        predicateExpression: FilterExprAndExpr(children: 
FilterTimestampScalarLessTimestampColumn(val 2002-02-01 01:02:03, col 
7:timestamp)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, 
val 1-2) -> 7:timestamp), FilterTimestampScalarGreaterTimestampColumn(val 
2002-04-01 01:02:03, col 8:timestamp)(children: 
TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 
8:timestamp), FilterTimestampColGreaterTimestampScalar(col 9:timestamp, val 
2002 [...]
+                    predicate: ((TIMESTAMP'2002-02-01 01:02:03' < (ts + 
INTERVAL'1-2')) and (TIMESTAMP'2002-04-01 01:02:03' > (ts + INTERVAL'1-2')) and 
((ts + INTERVAL'1-2') > TIMESTAMP'2002-02-01 01:02:03') and ((ts + 
INTERVAL'1-2') < TIMESTAMP'2002-04-01 01:02:03') and (ts < (ts + 
INTERVAL'1-0')) and (ts > (ts - INTERVAL'1-0')) and ((ts + INTERVAL'1-2') = 
TIMESTAMP'2002-03-01 01:02:03') and TIMESTAMP'2002-03-01 01:02:03' BETWEEN (ts 
+ INTERVAL'1-2') AND (ts + INTERVAL'1-2') and (TIMEST [...]
                     Statistics: Num rows: 1 Data size: 40 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ts (type: timestamp)
@@ -1740,7 +1740,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: vector_interval_2
-                  filterExpr: ((TIMESTAMP'2001-01-01 01:02:03' < (dt + 
INTERVAL'0 01:02:04.000000000')) and (TIMESTAMP'2001-01-01 01:02:03' > (dt - 
INTERVAL'0 01:02:04.000000000')) and ((dt + INTERVAL'0 01:02:04.000000000') > 
TIMESTAMP'2001-01-01 01:02:03') and ((dt - INTERVAL'0 01:02:04.000000000') < 
TIMESTAMP'2001-01-01 01:02:03') and (ts < (dt + INTERVAL'0 
01:02:04.000000000')) and (ts > (dt - INTERVAL'0 01:02:04.000000000')) and 
(TIMESTAMP'2001-01-01 01:02:03' = (dt + INTERVAL'0 01:0 [...]
+                  filterExpr: ((TIMESTAMP'2001-01-01 01:02:03' < (dt + 
INTERVAL'0 01:02:04.000000000')) and (TIMESTAMP'2001-01-01 01:02:03' > (dt - 
INTERVAL'0 01:02:04.000000000')) and ((dt + INTERVAL'0 01:02:04.000000000') > 
TIMESTAMP'2001-01-01 01:02:03') and ((dt - INTERVAL'0 01:02:04.000000000') < 
TIMESTAMP'2001-01-01 01:02:03') and (ts < (dt + INTERVAL'0 
01:02:04.000000000')) and (ts > (dt - INTERVAL'0 01:02:04.000000000')) and ((dt 
+ INTERVAL'0 01:02:03.000000000') = TIMESTAMP'2001 [...]
                   Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE 
Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
@@ -1748,8 +1748,8 @@ STAGE PLANS:
                     Filter Vectorization:
                         className: VectorFilterOperator
                         native: true
-                        predicateExpression: FilterExprAndExpr(children: 
FilterTimestampScalarLessTimestampColumn(val 2001-01-01 01:02:03, col 
7:timestamp)(children: DateColAddIntervalDayTimeScalar(col 1:date, val 0 
01:02:04.000000000) -> 7:timestamp), 
FilterTimestampScalarGreaterTimestampColumn(val 2001-01-01 01:02:03, col 
8:timestamp)(children: DateColSubtractIntervalDayTimeScalar(col 1:date, val 0 
01:02:04.000000000) -> 8:timestamp), 
FilterTimestampColGreaterTimestampScalar(col 9:time [...]
-                    predicate: ((TIMESTAMP'2001-01-01 01:02:03' < (dt + 
INTERVAL'0 01:02:04.000000000')) and (TIMESTAMP'2001-01-01 01:02:03' > (dt - 
INTERVAL'0 01:02:04.000000000')) and ((dt + INTERVAL'0 01:02:04.000000000') > 
TIMESTAMP'2001-01-01 01:02:03') and ((dt - INTERVAL'0 01:02:04.000000000') < 
TIMESTAMP'2001-01-01 01:02:03') and (ts < (dt + INTERVAL'0 
01:02:04.000000000')) and (ts > (dt - INTERVAL'0 01:02:04.000000000')) and 
(TIMESTAMP'2001-01-01 01:02:03' = (dt + INTERVAL'0 01: [...]
+                        predicateExpression: FilterExprAndExpr(children: 
FilterTimestampScalarLessTimestampColumn(val 2001-01-01 01:02:03, col 
7:timestamp)(children: DateColAddIntervalDayTimeScalar(col 1:date, val 0 
01:02:04.000000000) -> 7:timestamp), 
FilterTimestampScalarGreaterTimestampColumn(val 2001-01-01 01:02:03, col 
8:timestamp)(children: DateColSubtractIntervalDayTimeScalar(col 1:date, val 0 
01:02:04.000000000) -> 8:timestamp), 
FilterTimestampColGreaterTimestampScalar(col 9:time [...]
+                    predicate: ((TIMESTAMP'2001-01-01 01:02:03' < (dt + 
INTERVAL'0 01:02:04.000000000')) and (TIMESTAMP'2001-01-01 01:02:03' > (dt - 
INTERVAL'0 01:02:04.000000000')) and ((dt + INTERVAL'0 01:02:04.000000000') > 
TIMESTAMP'2001-01-01 01:02:03') and ((dt - INTERVAL'0 01:02:04.000000000') < 
TIMESTAMP'2001-01-01 01:02:03') and (ts < (dt + INTERVAL'0 
01:02:04.000000000')) and (ts > (dt - INTERVAL'0 01:02:04.000000000')) and ((dt 
+ INTERVAL'0 01:02:03.000000000') = TIMESTAMP'200 [...]
                     Statistics: Num rows: 1 Data size: 96 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ts (type: timestamp)
@@ -1940,7 +1940,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: vector_interval_2
-                  filterExpr: ((TIMESTAMP'2001-01-01 01:02:03' < (ts + 
INTERVAL'1 00:00:00.000000000')) and (TIMESTAMP'2001-01-01 01:02:03' > (ts - 
INTERVAL'1 00:00:00.000000000')) and ((ts + INTERVAL'1 00:00:00.000000000') > 
TIMESTAMP'2001-01-01 01:02:03') and ((ts - INTERVAL'1 00:00:00.000000000') < 
TIMESTAMP'2001-01-01 01:02:03') and (ts < (ts + INTERVAL'1 
00:00:00.000000000')) and (ts > (ts - INTERVAL'1 00:00:00.000000000')) and 
(TIMESTAMP'2001-01-01 01:02:03' = (ts + INTERVAL'0 00:0 [...]
+                  filterExpr: ((TIMESTAMP'2001-01-01 01:02:03' < (ts + 
INTERVAL'1 00:00:00.000000000')) and (TIMESTAMP'2001-01-01 01:02:03' > (ts - 
INTERVAL'1 00:00:00.000000000')) and ((ts + INTERVAL'1 00:00:00.000000000') > 
TIMESTAMP'2001-01-01 01:02:03') and ((ts - INTERVAL'1 00:00:00.000000000') < 
TIMESTAMP'2001-01-01 01:02:03') and (ts < (ts + INTERVAL'1 
00:00:00.000000000')) and (ts > (ts - INTERVAL'1 00:00:00.000000000')) and ((ts 
+ INTERVAL'0 00:00:00.000000000') = TIMESTAMP'2001 [...]
                   Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE 
Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
@@ -1948,8 +1948,8 @@ STAGE PLANS:
                     Filter Vectorization:
                         className: VectorFilterOperator
                         native: true
-                        predicateExpression: FilterExprAndExpr(children: 
FilterTimestampScalarLessTimestampColumn(val 2001-01-01 01:02:03, col 
7:timestamp)(children: TimestampColAddIntervalDayTimeScalar(col 0:timestamp, 
val 1 00:00:00.000000000) -> 7:timestamp), 
FilterTimestampScalarGreaterTimestampColumn(val 2001-01-01 01:02:03, col 
8:timestamp)(children: TimestampColSubtractIntervalDayTimeScalar(col 
0:timestamp, val 1 00:00:00.000000000) -> 8:timestamp), 
FilterTimestampColGreaterTimest [...]
-                    predicate: ((TIMESTAMP'2001-01-01 01:02:03' < (ts + 
INTERVAL'1 00:00:00.000000000')) and (TIMESTAMP'2001-01-01 01:02:03' > (ts - 
INTERVAL'1 00:00:00.000000000')) and ((ts + INTERVAL'1 00:00:00.000000000') > 
TIMESTAMP'2001-01-01 01:02:03') and ((ts - INTERVAL'1 00:00:00.000000000') < 
TIMESTAMP'2001-01-01 01:02:03') and (ts < (ts + INTERVAL'1 
00:00:00.000000000')) and (ts > (ts - INTERVAL'1 00:00:00.000000000')) and 
(TIMESTAMP'2001-01-01 01:02:03' = (ts + INTERVAL'0 00: [...]
+                        predicateExpression: FilterExprAndExpr(children: 
FilterTimestampScalarLessTimestampColumn(val 2001-01-01 01:02:03, col 
7:timestamp)(children: TimestampColAddIntervalDayTimeScalar(col 0:timestamp, 
val 1 00:00:00.000000000) -> 7:timestamp), 
FilterTimestampScalarGreaterTimestampColumn(val 2001-01-01 01:02:03, col 
8:timestamp)(children: TimestampColSubtractIntervalDayTimeScalar(col 
0:timestamp, val 1 00:00:00.000000000) -> 8:timestamp), 
FilterTimestampColGreaterTimest [...]
+                    predicate: ((TIMESTAMP'2001-01-01 01:02:03' < (ts + 
INTERVAL'1 00:00:00.000000000')) and (TIMESTAMP'2001-01-01 01:02:03' > (ts - 
INTERVAL'1 00:00:00.000000000')) and ((ts + INTERVAL'1 00:00:00.000000000') > 
TIMESTAMP'2001-01-01 01:02:03') and ((ts - INTERVAL'1 00:00:00.000000000') < 
TIMESTAMP'2001-01-01 01:02:03') and (ts < (ts + INTERVAL'1 
00:00:00.000000000')) and (ts > (ts - INTERVAL'1 00:00:00.000000000')) and ((ts 
+ INTERVAL'0 00:00:00.000000000') = TIMESTAMP'200 [...]
                     Statistics: Num rows: 1 Data size: 40 Basic stats: 
COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ts (type: timestamp)
diff --git a/ql/src/test/results/clientpositive/perf/tez/cbo_query74.q.out 
b/ql/src/test/results/clientpositive/perf/tez/cbo_query74.q.out
index d14c39a..47217f3 100644
--- a/ql/src/test/results/clientpositive/perf/tez/cbo_query74.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/cbo_query74.q.out
@@ -143,7 +143,7 @@ HiveSortLimit(sort0=[$2], sort1=[$0], sort2=[$1], 
dir0=[ASC], dir1=[ASC], dir2=[
                 HiveFilter(condition=[AND(IS NOT NULL($0), IS NOT NULL($3))])
                   HiveTableScan(table=[[default, store_sales]], 
table:alias=[store_sales])
               HiveProject(d_date_sk=[$0])
-                HiveFilter(condition=[AND(=($6, 1999), IN($6, 1998, 1999), IS 
NOT NULL($0))])
+                HiveFilter(condition=[AND(=($6, 1999), IS NOT NULL($0))])
                   HiveTableScan(table=[[default, date_dim]], 
table:alias=[date_dim])
       HiveJoin(condition=[=($2, $4)], joinType=[inner], algorithm=[none], 
cost=[not available])
         HiveJoin(condition=[=($2, $0)], joinType=[inner], algorithm=[none], 
cost=[not available])
@@ -158,7 +158,7 @@ HiveSortLimit(sort0=[$2], sort1=[$0], sort2=[$1], 
dir0=[ASC], dir1=[ASC], dir2=[
                     HiveFilter(condition=[AND(IS NOT NULL($4), IS NOT 
NULL($0))])
                       HiveTableScan(table=[[default, web_sales]], 
table:alias=[web_sales])
                   HiveProject(d_date_sk=[$0])
-                    HiveFilter(condition=[AND(=($6, 1999), IN($6, 1998, 1999), 
IS NOT NULL($0))])
+                    HiveFilter(condition=[AND(=($6, 1999), IS NOT NULL($0))])
                       HiveTableScan(table=[[default, date_dim]], 
table:alias=[date_dim])
           HiveProject($f0=[$0], $f4=[$3])
             HiveFilter(condition=[>($3, 0)])
@@ -172,7 +172,7 @@ HiveSortLimit(sort0=[$2], sort1=[$0], sort2=[$1], 
dir0=[ASC], dir1=[ASC], dir2=[
                       HiveFilter(condition=[AND(IS NOT NULL($0), IS NOT 
NULL($3))])
                         HiveTableScan(table=[[default, store_sales]], 
table:alias=[store_sales])
                     HiveProject(d_date_sk=[$0])
-                      HiveFilter(condition=[AND(=($6, 1998), IN($6, 1998, 
1999), IS NOT NULL($0))])
+                      HiveFilter(condition=[AND(=($6, 1998), IS NOT NULL($0))])
                         HiveTableScan(table=[[default, date_dim]], 
table:alias=[date_dim])
         HiveProject(customer_id=[$0], year_total=[$3], CAST=[CAST(IS NOT 
NULL($3)):BOOLEAN])
           HiveFilter(condition=[>($3, 0)])
@@ -186,6 +186,6 @@ HiveSortLimit(sort0=[$2], sort1=[$0], sort2=[$1], 
dir0=[ASC], dir1=[ASC], dir2=[
                     HiveFilter(condition=[AND(IS NOT NULL($4), IS NOT 
NULL($0))])
                       HiveTableScan(table=[[default, web_sales]], 
table:alias=[web_sales])
                   HiveProject(d_date_sk=[$0])
-                    HiveFilter(condition=[AND(=($6, 1998), IN($6, 1998, 1999), 
IS NOT NULL($0))])
+                    HiveFilter(condition=[AND(=($6, 1998), IS NOT NULL($0))])
                       HiveTableScan(table=[[default, date_dim]], 
table:alias=[date_dim])
 
diff --git 
a/ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query74.q.out 
b/ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query74.q.out
index 2574a76..0eb4b7c 100644
--- a/ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query74.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/constraints/cbo_query74.q.out
@@ -147,7 +147,7 @@ HiveSortLimit(sort0=[$2], sort1=[$0], sort2=[$1], 
dir0=[ASC], dir1=[ASC], dir2=[
                           HiveProject(ss_sold_date_sk=[$0], 
ss_customer_sk=[$3], ss_net_paid=[$20])
                             HiveTableScan(table=[[default, store_sales]], 
table:alias=[store_sales])
                       HiveProject(d_date_sk=[$0])
-                        HiveFilter(condition=[AND(=($1, 1999), IN($1, 1998, 
1999))])
+                        HiveFilter(condition=[=($1, 1999)])
                           HiveProject(d_date_sk=[$0], d_year=[$6])
                             HiveTableScan(table=[[default, date_dim]], 
table:alias=[date_dim])
               HiveProject($f0=[$0], $f1=[$1])
@@ -162,7 +162,7 @@ HiveSortLimit(sort0=[$2], sort1=[$0], sort2=[$1], 
dir0=[ASC], dir1=[ASC], dir2=[
                             HiveProject(ss_sold_date_sk=[$0], 
ss_customer_sk=[$3], ss_net_paid=[$20])
                               HiveTableScan(table=[[default, store_sales]], 
table:alias=[store_sales])
                         HiveProject(d_date_sk=[$0])
-                          HiveFilter(condition=[AND(=($1, 1998), IN($1, 1998, 
1999))])
+                          HiveFilter(condition=[=($1, 1998)])
                             HiveProject(d_date_sk=[$0], d_year=[$6])
                               HiveTableScan(table=[[default, date_dim]], 
table:alias=[date_dim])
             HiveProject(c_customer_id=[$0], $f1=[$1])
@@ -176,7 +176,7 @@ HiveSortLimit(sort0=[$2], sort1=[$0], sort2=[$1], 
dir0=[ASC], dir1=[ASC], dir2=[
                         HiveProject(ws_sold_date_sk=[$0], 
ws_bill_customer_sk=[$4], ws_net_paid=[$29])
                           HiveTableScan(table=[[default, web_sales]], 
table:alias=[web_sales])
                     HiveProject(d_date_sk=[$0])
-                      HiveFilter(condition=[AND(=($1, 1999), IN($1, 1998, 
1999))])
+                      HiveFilter(condition=[=($1, 1999)])
                         HiveProject(d_date_sk=[$0], d_year=[$6])
                           HiveTableScan(table=[[default, date_dim]], 
table:alias=[date_dim])
           HiveProject(customer_id=[$0], year_total=[$1], CAST=[CAST(IS NOT 
NULL($1)):BOOLEAN])
@@ -191,7 +191,7 @@ HiveSortLimit(sort0=[$2], sort1=[$0], sort2=[$1], 
dir0=[ASC], dir1=[ASC], dir2=[
                         HiveProject(ws_sold_date_sk=[$0], 
ws_bill_customer_sk=[$4], ws_net_paid=[$29])
                           HiveTableScan(table=[[default, web_sales]], 
table:alias=[web_sales])
                     HiveProject(d_date_sk=[$0])
-                      HiveFilter(condition=[AND(=($1, 1998), IN($1, 1998, 
1999))])
+                      HiveFilter(condition=[=($1, 1998)])
                         HiveProject(d_date_sk=[$0], d_year=[$6])
                           HiveTableScan(table=[[default, date_dim]], 
table:alias=[date_dim])
       HiveProject(c_customer_id=[$1], c_first_name=[$8], c_last_name=[$9])
diff --git 
a/ql/src/test/results/clientpositive/perf/tez/constraints/query74.q.out 
b/ql/src/test/results/clientpositive/perf/tez/constraints/query74.q.out
index 4c89bb3..c9f758b 100644
--- a/ql/src/test/results/clientpositive/perf/tez/constraints/query74.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/constraints/query74.q.out
@@ -224,7 +224,7 @@ Stage-0
                                                 Select Operator [SEL_339] 
(rows=652 width=4)
                                                   Output:["_col0"]
                                                   Filter Operator [FIL_335] 
(rows=652 width=8)
-                                                    predicate:((_col1 = 1998) 
and (_col1) IN (1998, 1999))
+                                                    predicate:(_col1 = 1998)
                                                     Select Operator [SEL_331] 
(rows=73049 width=8)
                                                       Output:["_col0","_col1"]
                                                       TableScan [TS_4] 
(rows=73049 width=8)
@@ -283,7 +283,7 @@ Stage-0
                                                 Select Operator [SEL_338] 
(rows=652 width=4)
                                                   Output:["_col0"]
                                                   Filter Operator [FIL_334] 
(rows=652 width=8)
-                                                    predicate:((_col1 = 1999) 
and (_col1) IN (1998, 1999))
+                                                    predicate:(_col1 = 1999)
                                                      Please refer to the 
previous Select Operator [SEL_331]
                                             <-Map 26 [SIMPLE_EDGE] vectorized
                                               SHUFFLE [RS_385]
@@ -341,7 +341,7 @@ Stage-0
                                                       Select Operator 
[SEL_337] (rows=652 width=4)
                                                         Output:["_col0"]
                                                         Filter Operator 
[FIL_333] (rows=652 width=8)
-                                                          predicate:((_col1 = 
1998) and (_col1) IN (1998, 1999))
+                                                          predicate:(_col1 = 
1998)
                                                            Please refer to the 
previous Select Operator [SEL_331]
                                                   <-Map 25 [SIMPLE_EDGE] 
vectorized
                                                     SHUFFLE [RS_376]
@@ -392,7 +392,7 @@ Stage-0
                                                     Select Operator [SEL_336] 
(rows=652 width=4)
                                                       Output:["_col0"]
                                                       Filter Operator 
[FIL_332] (rows=652 width=8)
-                                                        predicate:((_col1 = 
1999) and (_col1) IN (1998, 1999))
+                                                        predicate:(_col1 = 
1999)
                                                          Please refer to the 
previous Select Operator [SEL_331]
                                                 <-Map 1 [SIMPLE_EDGE] 
vectorized
                                                   SHUFFLE [RS_361]
diff --git a/ql/src/test/results/clientpositive/perf/tez/query74.q.out 
b/ql/src/test/results/clientpositive/perf/tez/query74.q.out
index 831ade0..2d1ff69 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query74.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query74.q.out
@@ -208,7 +208,7 @@ Stage-0
                                       Select Operator [SEL_292] (rows=652 
width=4)
                                         Output:["_col0"]
                                         Filter Operator [FIL_289] (rows=652 
width=8)
-                                          predicate:((d_year = 1999) and 
(d_year) IN (1998, 1999) and d_date_sk is not null)
+                                          predicate:((d_year = 1999) and 
d_date_sk is not null)
                                           TableScan [TS_65] (rows=73049 
width=8)
                                             
default@date_dim,date_dim,Tbl:COMPLETE,Col:COMPLETE,Output:["d_date_sk","d_year"]
                                   <-Map 17 [SIMPLE_EDGE] vectorized
@@ -269,7 +269,7 @@ Stage-0
                                                 Select Operator [SEL_294] 
(rows=652 width=4)
                                                   Output:["_col0"]
                                                   Filter Operator [FIL_291] 
(rows=652 width=8)
-                                                    predicate:((d_year = 1998) 
and (d_year) IN (1998, 1999) and d_date_sk is not null)
+                                                    predicate:((d_year = 1998) 
and d_date_sk is not null)
                                                      Please refer to the 
previous TableScan [TS_65]
                                             <-Map 13 [SIMPLE_EDGE] vectorized
                                               SHUFFLE [RS_339]
@@ -329,7 +329,7 @@ Stage-0
                                                     Select Operator [SEL_293] 
(rows=652 width=4)
                                                       Output:["_col0"]
                                                       Filter Operator 
[FIL_290] (rows=652 width=8)
-                                                        predicate:((d_year = 
1998) and (d_year) IN (1998, 1999) and d_date_sk is not null)
+                                                        predicate:((d_year = 
1998) and d_date_sk is not null)
                                                          Please refer to the 
previous TableScan [TS_65]
                                                 <-Map 9 [SIMPLE_EDGE] 
vectorized
                                                   SHUFFLE [RS_329]

Reply via email to