Author: hashutosh
Date: Thu Dec 19 22:49:01 2013
New Revision: 1552449

URL: http://svn.apache.org/r1552449
Log:
HIVE-6052 : metastore JDO filter pushdown for integers may produce unexpected 
results with non-normalized integer columns (Sergey Shelukhin via Ashutosh 
Chauhan)

Modified:
    hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
    
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
    
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
    hive/trunk/ql/src/test/queries/clientpositive/alter_partition_coltype.q
    hive/trunk/ql/src/test/queries/clientpositive/annotate_stats_part.q
    
hive/trunk/ql/src/test/queries/clientpositive/dynamic_partition_skip_default.q
    hive/trunk/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
    hive/trunk/ql/src/test/results/clientpositive/annotate_stats_part.q.out
    
hive/trunk/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out

Modified: hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: 
http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1552449&r1=1552448&r2=1552449&view=diff
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
(original)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Thu 
Dec 19 22:49:01 2013
@@ -340,6 +340,9 @@ public class HiveConf extends Configurat
     METASTORE_EXECUTE_SET_UGI("hive.metastore.execute.setugi", false),
     METASTORE_PARTITION_NAME_WHITELIST_PATTERN(
         "hive.metastore.partition.name.whitelist.pattern", ""),
+    // Whether to enable integral JDO pushdown. For partition columns storing 
integers
+    // in non-canonical form, (e.g. '012'), it may not work, so it's off by 
default.
+    METASTORE_INTEGER_JDO_PUSHDOWN("hive.metastore.integral.jdo.pushdown", 
false),
     METASTORE_TRY_DIRECT_SQL("hive.metastore.try.direct.sql", true),
     METASTORE_TRY_DIRECT_SQL_DDL("hive.metastore.try.direct.sql.ddl", true),
     METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES(

Modified: 
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
URL: 
http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java?rev=1552449&r1=1552448&r2=1552449&view=diff
==============================================================================
--- 
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
 (original)
+++ 
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
 Thu Dec 19 22:49:01 2013
@@ -745,9 +745,6 @@ class MetaStoreDirectSql {
         return;
       }
 
-      // Force string-based handling in some cases to be compatible with JDO 
pushdown.
-      boolean forceStringEq = !isStringCol && 
node.canJdoUseStringsWithIntegral();
-
       if (joins.isEmpty()) {
         // There's a fixed number of partition cols that we might have filters 
on. To avoid
         // joining multiple times for one column (if there are several filters 
on it), we will
@@ -765,16 +762,24 @@ class MetaStoreDirectSql {
 
       // Build the filter and add parameters linearly; we are traversing leaf 
nodes LTR.
       String tableValue = "\"FILTER" + partColIndex + "\".\"PART_KEY_VAL\"";
-      if (!isStringCol && !forceStringEq) {
+      if (node.isReverseOrder) {
+        params.add(node.value);
+      }
+      if (!isStringCol) {
         // The underlying database field is varchar, we need to compare 
numbers.
+        // Note that this won't work with __HIVE_DEFAULT_PARTITION__. It will 
fail and fall
+        // back to JDO. That is by design; we could add an ugly workaround 
here but didn't.
         tableValue = "cast(" + tableValue + " as decimal(21,0))";
+
         // This is a workaround for DERBY-6358; as such, it is pretty horrible.
         tableValue = "(case when \"TBLS\".\"TBL_NAME\" = ? and 
\"DBS\".\"NAME\" = ? then "
           + tableValue + " else null end)";
         params.add(table.getTableName().toLowerCase());
         params.add(table.getDbName().toLowerCase());
       }
-      params.add(forceStringEq ? node.value.toString() : node.value);
+      if (!node.isReverseOrder) {
+        params.add(node.value);
+      }
 
       filterBuffer.append(node.isReverseOrder
           ? "(? " + node.operator.getSqlOp() + " " + tableValue + ")"

Modified: 
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: 
http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1552449&r1=1552448&r2=1552449&view=diff
==============================================================================
--- 
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java 
(original)
+++ 
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java 
Thu Dec 19 22:49:01 2013
@@ -2199,7 +2199,7 @@ public class ObjectStore implements RawS
       params.put("dbName", dbName);
     }
 
-    tree.generateJDOFilterFragment(table, params, queryBuilder);
+    tree.generateJDOFilterFragment(getConf(), table, params, queryBuilder);
     if (queryBuilder.hasError()) {
       assert !isValidatedFilter;
       LOG.info("JDO filter pushdown cannot be used: " + 
queryBuilder.getErrorMessage());

Modified: 
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
URL: 
http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java?rev=1552449&r1=1552448&r2=1552449&view=diff
==============================================================================
--- 
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
 (original)
+++ 
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
 Thu Dec 19 22:49:01 2013
@@ -24,8 +24,9 @@ import java.util.Stack;
 
 import org.antlr.runtime.ANTLRStringStream;
 import org.antlr.runtime.CharStream;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.metastore.ObjectStore;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Table;
@@ -231,12 +232,12 @@ public class ExpressionTree {
      * @return a JDO filter statement
      * @throws MetaException
      */
-    public void generateJDOFilter(Table table, Map<String, Object> params,
-        FilterBuilder filterBuffer) throws MetaException {
+    public void generateJDOFilter(Configuration conf, Table table,
+        Map<String, Object> params, FilterBuilder filterBuffer) throws 
MetaException {
       if (filterBuffer.hasError()) return;
       if (lhs != null) {
         filterBuffer.append (" (");
-        lhs.generateJDOFilter(table, params, filterBuffer);
+        lhs.generateJDOFilter(conf, table, params, filterBuffer);
 
         if (rhs != null) {
           if( andOr == LogicalOperator.AND ) {
@@ -245,7 +246,7 @@ public class ExpressionTree {
             filterBuffer.append(" || ");
           }
 
-          rhs.generateJDOFilter(table, params, filterBuffer);
+          rhs.generateJDOFilter(conf, table, params, filterBuffer);
         }
         filterBuffer.append (") ");
       }
@@ -269,10 +270,10 @@ public class ExpressionTree {
     }
 
     @Override
-    public void generateJDOFilter(Table table, Map<String, Object> params,
+    public void generateJDOFilter(Configuration conf, Table table, Map<String, 
Object> params,
         FilterBuilder filterBuilder) throws MetaException {
       if (table != null) {
-        generateJDOFilterOverPartitions(table, params, filterBuilder);
+        generateJDOFilterOverPartitions(conf, table, params, filterBuilder);
       } else {
         generateJDOFilterOverTables(params, filterBuilder);
       }
@@ -342,13 +343,16 @@ public class ExpressionTree {
       }
     }
 
-    private void generateJDOFilterOverPartitions(Table table, Map<String, 
Object> params,
-        FilterBuilder filterBuilder) throws MetaException {
+    private void generateJDOFilterOverPartitions(Configuration conf, Table 
table,
+        Map<String, Object> params,  FilterBuilder filterBuilder) throws 
MetaException {
       int partitionColumnCount = table.getPartitionKeys().size();
       int partitionColumnIndex = getPartColIndexForFilter(table, 
filterBuilder);
       if (filterBuilder.hasError()) return;
 
-      String valueAsString = getJdoFilterPushdownParam(table, 
partitionColumnIndex, filterBuilder);
+      boolean canPushDownIntegral =
+          HiveConf.getBoolVar(conf, 
HiveConf.ConfVars.METASTORE_INTEGER_JDO_PUSHDOWN);
+      String valueAsString = getJdoFilterPushdownParam(
+          table, partitionColumnIndex, filterBuilder, canPushDownIntegral);
       if (filterBuilder.hasError()) return;
 
       String paramName = PARAM_PREFIX + params.size();
@@ -435,9 +439,9 @@ public class ExpressionTree {
      * @param filterBuilder filter builder used to report error, if any.
      * @return The parameter string.
      */
-    private String getJdoFilterPushdownParam(
-        Table table, int partColIndex, FilterBuilder filterBuilder) throws 
MetaException {
-      boolean isIntegralSupported = canJdoUseStringsWithIntegral();
+    private String getJdoFilterPushdownParam(Table table, int partColIndex,
+        FilterBuilder filterBuilder, boolean canPushDownIntegral) throws 
MetaException {
+      boolean isIntegralSupported = canPushDownIntegral && 
canJdoUseStringsWithIntegral();
       String colType = table.getPartitionKeys().get(partColIndex).getType();
       // Can only support partitions whose types are string, or maybe integers
       if (!colType.equals(serdeConstants.STRING_TYPE_NAME)
@@ -567,14 +571,14 @@ public class ExpressionTree {
    *     are the parameter values
    * @param filterBuilder the filter builder to append to.
    */
-  public void generateJDOFilterFragment(Table table, Map<String, Object> 
params,
-      FilterBuilder filterBuilder) throws MetaException {
+  public void generateJDOFilterFragment(Configuration conf, Table table,
+      Map<String, Object> params, FilterBuilder filterBuilder) throws 
MetaException {
     if (root == null) {
       return;
     }
 
     filterBuilder.append(" && ( ");
-    root.generateJDOFilter(table, params, filterBuilder);
+    root.generateJDOFilter(conf, table, params, filterBuilder);
     filterBuilder.append(" )");
   }
 

Modified: 
hive/trunk/ql/src/test/queries/clientpositive/alter_partition_coltype.q
URL: 
http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/alter_partition_coltype.q?rev=1552449&r1=1552448&r2=1552449&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/alter_partition_coltype.q 
(original)
+++ hive/trunk/ql/src/test/queries/clientpositive/alter_partition_coltype.q Thu 
Dec 19 22:49:01 2013
@@ -19,7 +19,7 @@ insert overwrite table alter_coltype par
 select count(*) from alter_coltype where dt = '100x';
 explain extended select count(*) from alter_coltype where dt = '100x';
 
-select count(*) from alter_coltype where dt = 100;
+select count(*) from alter_coltype where dt = '100';
 
 -- alter partition key column data type for ts column.
 alter table alter_coltype partition column (ts double);

Modified: hive/trunk/ql/src/test/queries/clientpositive/annotate_stats_part.q
URL: 
http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/annotate_stats_part.q?rev=1552449&r1=1552448&r2=1552449&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/annotate_stats_part.q 
(original)
+++ hive/trunk/ql/src/test/queries/clientpositive/annotate_stats_part.q Thu Dec 
19 22:49:01 2013
@@ -7,7 +7,7 @@ create table if not exists loc_staging (
   state string,
   locid int,
   zip bigint,
-  year int
+  year string
 ) row format delimited fields terminated by '|' stored as textfile;
 
 LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE 
loc_staging;
@@ -16,7 +16,7 @@ create table if not exists loc_orc (
   state string,
   locid int,
   zip bigint
-) partitioned by(year int) stored as orc;
+) partitioned by(year string) stored as orc;
 
 -- basicStatState: NONE colStatState: NONE
 explain extended select * from loc_orc;
@@ -29,7 +29,7 @@ insert overwrite table loc_orc partition
 explain extended select * from loc_orc;
 
 -- partition level analyze statistics for specific parition
-analyze table loc_orc partition(year=2001) compute statistics;
+analyze table loc_orc partition(year='2001') compute statistics;
 
 -- basicStatState: PARTIAL colStatState: NONE
 explain extended select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__';
@@ -38,7 +38,7 @@ explain extended select * from loc_orc w
 explain extended select * from loc_orc;
 
 -- basicStatState: COMPLETE colStatState: NONE
-explain extended select * from loc_orc where year=2001;
+explain extended select * from loc_orc where year='2001';
 
 -- partition level analyze statistics for all partitions
 analyze table loc_orc partition(year) compute statistics;
@@ -50,14 +50,14 @@ explain extended select * from loc_orc w
 explain extended select * from loc_orc;
 
 -- basicStatState: COMPLETE colStatState: NONE
-explain extended select * from loc_orc where year=2001 or 
year='__HIVE_DEFAULT_PARTITION__';
+explain extended select * from loc_orc where year='2001' or 
year='__HIVE_DEFAULT_PARTITION__';
 
 -- both partitions will be pruned
 -- basicStatState: NONE colStatState: NONE
-explain extended select * from loc_orc where year=2001 and 
year='__HIVE_DEFAULT_PARTITION__';
+explain extended select * from loc_orc where year='2001' and 
year='__HIVE_DEFAULT_PARTITION__';
 
 -- partition level partial column statistics
-analyze table loc_orc partition(year=2001) compute statistics for columns 
state,locid;
+analyze table loc_orc partition(year='2001') compute statistics for columns 
state,locid;
 
 -- basicStatState: COMPLETE colStatState: NONE
 explain extended select zip from loc_orc;
@@ -70,16 +70,16 @@ explain extended select state from loc_o
 explain extended select state,locid from loc_orc;
 
 -- basicStatState: COMPLETE colStatState: COMPLETE
-explain extended select state,locid from loc_orc where year=2001;
+explain extended select state,locid from loc_orc where year='2001';
 
 -- basicStatState: COMPLETE colStatState: NONE
-explain extended select state,locid from loc_orc where year!=2001;
+explain extended select state,locid from loc_orc where year!='2001';
 
 -- basicStatState: COMPLETE colStatState: PARTIAL
 explain extended select * from loc_orc;
 
 -- This is to test filter expression evaluation on partition column
 -- numRows: 2 dataSize: 8 basicStatState: COMPLETE colStatState: COMPLETE
-explain extended select locid from loc_orc where locid>0 and year=2001;
-explain extended select locid,year from loc_orc where locid>0 and year=2001;
-explain extended select * from (select locid,year from loc_orc) test where 
locid>0 and year=2001;
+explain extended select locid from loc_orc where locid>0 and year='2001';
+explain extended select locid,year from loc_orc where locid>0 and year='2001';
+explain extended select * from (select locid,year from loc_orc) test where 
locid>0 and year='2001';

Modified: 
hive/trunk/ql/src/test/queries/clientpositive/dynamic_partition_skip_default.q
URL: 
http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/dynamic_partition_skip_default.q?rev=1552449&r1=1552448&r2=1552449&view=diff
==============================================================================
--- 
hive/trunk/ql/src/test/queries/clientpositive/dynamic_partition_skip_default.q 
(original)
+++ 
hive/trunk/ql/src/test/queries/clientpositive/dynamic_partition_skip_default.q 
Thu Dec 19 22:49:01 2013
@@ -1,19 +1,19 @@
-create table dynamic_part_table(intcol int) partitioned by (partcol1 int, 
partcol2 int);
+create table dynamic_part_table(intcol string) partitioned by (partcol1 
string, partcol2 string);
 
 set hive.exec.dynamic.partition.mode=nonstrict;
 
-insert into table dynamic_part_table partition(partcol1, partcol2) select 1, 
1, 1 from src where key=150;
+insert into table dynamic_part_table partition(partcol1, partcol2) select '1', 
'1', '1' from src where key=150;
 
-insert into table dynamic_part_table partition(partcol1, partcol2) select 1, 
NULL, 1 from src where key=150;
+insert into table dynamic_part_table partition(partcol1, partcol2) select '1', 
NULL, '1' from src where key=150;
 
-insert into table dynamic_part_table partition(partcol1, partcol2) select 1, 
1, NULL from src where key=150;
+insert into table dynamic_part_table partition(partcol1, partcol2) select '1', 
'1', NULL from src where key=150;
 
-insert into table dynamic_part_table partition(partcol1, partcol2) select 1, 
NULL, NULL from src where key=150;
+insert into table dynamic_part_table partition(partcol1, partcol2) select '1', 
NULL, NULL from src where key=150;
 
-explain extended select intcol from dynamic_part_table where partcol1=1 and 
partcol2=1;
+explain extended select intcol from dynamic_part_table where partcol1='1' and 
partcol2='1';
 
 set hive.exec.dynamic.partition.mode=strict;
 
-explain extended select intcol from dynamic_part_table where partcol1=1 and 
partcol2=1;
+explain extended select intcol from dynamic_part_table where partcol1='1' and 
partcol2='1';
 
-explain extended select intcol from dynamic_part_table where (partcol1=1 and 
partcol2=1)or (partcol1=1 and partcol2='__HIVE_DEFAULT_PARTITION__');
+explain extended select intcol from dynamic_part_table where (partcol1='1' and 
partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__');

Modified: 
hive/trunk/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
URL: 
http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/alter_partition_coltype.q.out?rev=1552449&r1=1552448&r2=1552449&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/alter_partition_coltype.q.out 
(original)
+++ hive/trunk/ql/src/test/results/clientpositive/alter_partition_coltype.q.out 
Thu Dec 19 22:49:01 2013
@@ -225,11 +225,11 @@ STAGE PLANS:
     Fetch Operator
       limit: -1
 
-PREHOOK: query: select count(*) from alter_coltype where dt = 100
+PREHOOK: query: select count(*) from alter_coltype where dt = '100'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alter_coltype
 #### A masked pattern was here ####
-POSTHOOK: query: select count(*) from alter_coltype where dt = 100
+POSTHOOK: query: select count(*) from alter_coltype where dt = '100'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alter_coltype
 #### A masked pattern was here ####

Modified: 
hive/trunk/ql/src/test/results/clientpositive/annotate_stats_part.q.out
URL: 
http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/annotate_stats_part.q.out?rev=1552449&r1=1552448&r2=1552449&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/annotate_stats_part.q.out 
(original)
+++ hive/trunk/ql/src/test/results/clientpositive/annotate_stats_part.q.out Thu 
Dec 19 22:49:01 2013
@@ -2,14 +2,14 @@ PREHOOK: query: create table if not exis
   state string,
   locid int,
   zip bigint,
-  year int
+  year string
 ) row format delimited fields terminated by '|' stored as textfile
 PREHOOK: type: CREATETABLE
 POSTHOOK: query: create table if not exists loc_staging (
   state string,
   locid int,
   zip bigint,
-  year int
+  year string
 ) row format delimited fields terminated by '|' stored as textfile
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@loc_staging
@@ -23,13 +23,13 @@ PREHOOK: query: create table if not exis
   state string,
   locid int,
   zip bigint
-) partitioned by(year int) stored as orc
+) partitioned by(year string) stored as orc
 PREHOOK: type: CREATETABLE
 POSTHOOK: query: create table if not exists loc_orc (
   state string,
   locid int,
   zip bigint
-) partitioned by(year int) stored as orc
+) partitioned by(year string) stored as orc
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@loc_orc
 PREHOOK: query: -- basicStatState: NONE colStatState: NONE
@@ -211,14 +211,14 @@ STAGE PLANS:
             ListSink
 
 PREHOOK: query: -- partition level analyze statistics for specific parition
-analyze table loc_orc partition(year=2001) compute statistics
+analyze table loc_orc partition(year='2001') compute statistics
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc
 PREHOOK: Input: default@loc_orc@year=2001
 PREHOOK: Output: default@loc_orc
 PREHOOK: Output: default@loc_orc@year=2001
 POSTHOOK: query: -- partition level analyze statistics for specific parition
-analyze table loc_orc partition(year=2001) compute statistics
+analyze table loc_orc partition(year='2001') compute statistics
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc
 POSTHOOK: Input: default@loc_orc@year=2001
@@ -436,10 +436,10 @@ STAGE PLANS:
             ListSink
 
 PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE
-explain extended select * from loc_orc where year=2001
+explain extended select * from loc_orc where year='2001'
 PREHOOK: type: QUERY
 POSTHOOK: query: -- basicStatState: COMPLETE colStatState: NONE
-explain extended select * from loc_orc where year=2001
+explain extended select * from loc_orc where year='2001'
 POSTHOOK: type: QUERY
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).locid SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).state SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ]
@@ -448,7 +448,7 @@ POSTHOOK: Lineage: loc_orc PARTITION(yea
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).state 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, 
comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).zip 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, 
comment:null), ]
 ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME loc_orc))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
TOK_ALLCOLREF)) (TOK_WHERE (= (TOK_TABLE_OR_COL year) 2001))))
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME loc_orc))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
TOK_ALLCOLREF)) (TOK_WHERE (= (TOK_TABLE_OR_COL year) '2001'))))
 
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
@@ -748,10 +748,10 @@ STAGE PLANS:
             ListSink
 
 PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE
-explain extended select * from loc_orc where year=2001 or 
year='__HIVE_DEFAULT_PARTITION__'
+explain extended select * from loc_orc where year='2001' or 
year='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
 POSTHOOK: query: -- basicStatState: COMPLETE colStatState: NONE
-explain extended select * from loc_orc where year=2001 or 
year='__HIVE_DEFAULT_PARTITION__'
+explain extended select * from loc_orc where year='2001' or 
year='__HIVE_DEFAULT_PARTITION__'
 POSTHOOK: type: QUERY
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).locid SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).state SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ]
@@ -760,7 +760,7 @@ POSTHOOK: Lineage: loc_orc PARTITION(yea
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).state 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, 
comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).zip 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, 
comment:null), ]
 ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME loc_orc))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
TOK_ALLCOLREF)) (TOK_WHERE (or (= (TOK_TABLE_OR_COL year) 2001) (= 
(TOK_TABLE_OR_COL year) '__HIVE_DEFAULT_PARTITION__')))))
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME loc_orc))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
TOK_ALLCOLREF)) (TOK_WHERE (or (= (TOK_TABLE_OR_COL year) '2001') (= 
(TOK_TABLE_OR_COL year) '__HIVE_DEFAULT_PARTITION__')))))
 
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
@@ -871,11 +871,11 @@ STAGE PLANS:
 
 PREHOOK: query: -- both partitions will be pruned
 -- basicStatState: NONE colStatState: NONE
-explain extended select * from loc_orc where year=2001 and 
year='__HIVE_DEFAULT_PARTITION__'
+explain extended select * from loc_orc where year='2001' and 
year='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
 POSTHOOK: query: -- both partitions will be pruned
 -- basicStatState: NONE colStatState: NONE
-explain extended select * from loc_orc where year=2001 and 
year='__HIVE_DEFAULT_PARTITION__'
+explain extended select * from loc_orc where year='2001' and 
year='__HIVE_DEFAULT_PARTITION__'
 POSTHOOK: type: QUERY
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).locid SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).state SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ]
@@ -884,7 +884,7 @@ POSTHOOK: Lineage: loc_orc PARTITION(yea
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).state 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, 
comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).zip 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, 
comment:null), ]
 ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME loc_orc))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
TOK_ALLCOLREF)) (TOK_WHERE (and (= (TOK_TABLE_OR_COL year) 2001) (= 
(TOK_TABLE_OR_COL year) '__HIVE_DEFAULT_PARTITION__')))))
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME loc_orc))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
TOK_ALLCOLREF)) (TOK_WHERE (and (= (TOK_TABLE_OR_COL year) '2001') (= 
(TOK_TABLE_OR_COL year) '__HIVE_DEFAULT_PARTITION__')))))
 
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
@@ -902,7 +902,7 @@ STAGE PLANS:
           Filter Operator
             isSamplingPred: false
             predicate:
-                expr: ((year = 2001) and (year = '__HIVE_DEFAULT_PARTITION__'))
+                expr: ((year = '2001') and (year = 
'__HIVE_DEFAULT_PARTITION__'))
                 type: boolean
             Statistics:
                 numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: 
NONE
@@ -922,13 +922,13 @@ STAGE PLANS:
               ListSink
 
 PREHOOK: query: -- partition level partial column statistics
-analyze table loc_orc partition(year=2001) compute statistics for columns 
state,locid
+analyze table loc_orc partition(year='2001') compute statistics for columns 
state,locid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc
 PREHOOK: Input: default@loc_orc@year=2001
 #### A masked pattern was here ####
 POSTHOOK: query: -- partition level partial column statistics
-analyze table loc_orc partition(year=2001) compute statistics for columns 
state,locid
+analyze table loc_orc partition(year='2001') compute statistics for columns 
state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc
 POSTHOOK: Input: default@loc_orc@year=2001
@@ -1397,10 +1397,10 @@ STAGE PLANS:
       limit: -1
 
 PREHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE
-explain extended select state,locid from loc_orc where year=2001
+explain extended select state,locid from loc_orc where year='2001'
 PREHOOK: type: QUERY
 POSTHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE
-explain extended select state,locid from loc_orc where year=2001
+explain extended select state,locid from loc_orc where year='2001'
 POSTHOOK: type: QUERY
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).locid SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).state SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ]
@@ -1409,7 +1409,7 @@ POSTHOOK: Lineage: loc_orc PARTITION(yea
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).state 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, 
comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).zip 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, 
comment:null), ]
 ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME loc_orc))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL state)) (TOK_SELEXPR (TOK_TABLE_OR_COL locid))) (TOK_WHERE (= 
(TOK_TABLE_OR_COL year) 2001))))
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME loc_orc))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL state)) (TOK_SELEXPR (TOK_TABLE_OR_COL locid))) (TOK_WHERE (= 
(TOK_TABLE_OR_COL year) '2001'))))
 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -1508,10 +1508,10 @@ STAGE PLANS:
       limit: -1
 
 PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE
-explain extended select state,locid from loc_orc where year!=2001
+explain extended select state,locid from loc_orc where year!='2001'
 PREHOOK: type: QUERY
 POSTHOOK: query: -- basicStatState: COMPLETE colStatState: NONE
-explain extended select state,locid from loc_orc where year!=2001
+explain extended select state,locid from loc_orc where year!='2001'
 POSTHOOK: type: QUERY
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).locid SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).state SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ]
@@ -1520,7 +1520,7 @@ POSTHOOK: Lineage: loc_orc PARTITION(yea
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).state 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, 
comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).zip 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, 
comment:null), ]
 ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME loc_orc))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL state)) (TOK_SELEXPR (TOK_TABLE_OR_COL locid))) (TOK_WHERE 
(!= (TOK_TABLE_OR_COL year) 2001))))
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME loc_orc))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL state)) (TOK_SELEXPR (TOK_TABLE_OR_COL locid))) (TOK_WHERE 
(!= (TOK_TABLE_OR_COL year) '2001'))))
 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -1536,44 +1536,37 @@ STAGE PLANS:
             Statistics:
                 numRows: 1 dataSize: 325 basicStatsState: COMPLETE 
colStatsState: NONE
             GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate:
-                  expr: (year <> 2001)
-                  type: boolean
+            Select Operator
+              expressions:
+                    expr: state
+                    type: string
+                    expr: locid
+                    type: int
+              outputColumnNames: _col0, _col1
               Statistics:
                   numRows: 1 dataSize: 325 basicStatsState: COMPLETE 
colStatsState: NONE
-              Select Operator
-                expressions:
-                      expr: state
-                      type: string
-                      expr: locid
-                      type: int
-                outputColumnNames: _col0, _col1
+              File Output Operator
+                compressed: false
+                GlobalTableId: 0
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
                 Statistics:
                     numRows: 1 dataSize: 325 basicStatsState: COMPLETE 
colStatsState: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics:
-                      numRows: 1 dataSize: 325 basicStatsState: COMPLETE 
colStatsState: NONE
 #### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      properties:
-                        columns _col0,_col1
-                        columns.types string:int
-                        escape.delim \
-                        hive.serialization.extend.nesting.levels true
-                        serialization.format 1
-                        serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      columns _col0,_col1
+                      columns.types string:int
+                      escape.delim \
+                      hive.serialization.extend.nesting.levels true
+                      serialization.format 1
+                      serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                TotalFiles: 1
+                GatherStats: false
+                MultiFileSpray: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -1749,11 +1742,11 @@ STAGE PLANS:
 
 PREHOOK: query: -- This is to test filter expression evaluation on partition 
column
 -- numRows: 2 dataSize: 8 basicStatState: COMPLETE colStatState: COMPLETE
-explain extended select locid from loc_orc where locid>0 and year=2001
+explain extended select locid from loc_orc where locid>0 and year='2001'
 PREHOOK: type: QUERY
 POSTHOOK: query: -- This is to test filter expression evaluation on partition 
column
 -- numRows: 2 dataSize: 8 basicStatState: COMPLETE colStatState: COMPLETE
-explain extended select locid from loc_orc where locid>0 and year=2001
+explain extended select locid from loc_orc where locid>0 and year='2001'
 POSTHOOK: type: QUERY
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).locid SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).state SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ]
@@ -1762,7 +1755,7 @@ POSTHOOK: Lineage: loc_orc PARTITION(yea
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).state 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, 
comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).zip 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, 
comment:null), ]
 ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME loc_orc))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL locid))) (TOK_WHERE (and (> (TOK_TABLE_OR_COL locid) 0) (= 
(TOK_TABLE_OR_COL year) 2001)))))
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME loc_orc))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL locid))) (TOK_WHERE (and (> (TOK_TABLE_OR_COL locid) 0) (= 
(TOK_TABLE_OR_COL year) '2001')))))
 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -1865,9 +1858,9 @@ STAGE PLANS:
     Fetch Operator
       limit: -1
 
-PREHOOK: query: explain extended select locid,year from loc_orc where locid>0 
and year=2001
+PREHOOK: query: explain extended select locid,year from loc_orc where locid>0 
and year='2001'
 PREHOOK: type: QUERY
-POSTHOOK: query: explain extended select locid,year from loc_orc where locid>0 
and year=2001
+POSTHOOK: query: explain extended select locid,year from loc_orc where locid>0 
and year='2001'
 POSTHOOK: type: QUERY
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).locid SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).state SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ]
@@ -1876,7 +1869,7 @@ POSTHOOK: Lineage: loc_orc PARTITION(yea
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).state 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, 
comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).zip 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, 
comment:null), ]
 ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME loc_orc))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL locid)) (TOK_SELEXPR (TOK_TABLE_OR_COL year))) (TOK_WHERE 
(and (> (TOK_TABLE_OR_COL locid) 0) (= (TOK_TABLE_OR_COL year) 2001)))))
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME loc_orc))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL locid)) (TOK_SELEXPR (TOK_TABLE_OR_COL year))) (TOK_WHERE 
(and (> (TOK_TABLE_OR_COL locid) 0) (= (TOK_TABLE_OR_COL year) '2001')))))
 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -1981,9 +1974,9 @@ STAGE PLANS:
     Fetch Operator
       limit: -1
 
-PREHOOK: query: explain extended select * from (select locid,year from 
loc_orc) test where locid>0 and year=2001
+PREHOOK: query: explain extended select * from (select locid,year from 
loc_orc) test where locid>0 and year='2001'
 PREHOOK: type: QUERY
-POSTHOOK: query: explain extended select * from (select locid,year from 
loc_orc) test where locid>0 and year=2001
+POSTHOOK: query: explain extended select * from (select locid,year from 
loc_orc) test where locid>0 and year='2001'
 POSTHOOK: type: QUERY
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).locid SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=2001).state SIMPLE 
[(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ]
@@ -1992,7 +1985,7 @@ POSTHOOK: Lineage: loc_orc PARTITION(yea
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).state 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, 
comment:null), ]
 POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).zip 
SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, 
comment:null), ]
 ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF 
(TOK_TABNAME loc_orc))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) 
(TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL locid)) (TOK_SELEXPR 
(TOK_TABLE_OR_COL year))))) test)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR 
TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (and (> 
(TOK_TABLE_OR_COL locid) 0) (= (TOK_TABLE_OR_COL year) 2001)))))
+  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF 
(TOK_TABNAME loc_orc))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) 
(TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL locid)) (TOK_SELEXPR 
(TOK_TABLE_OR_COL year))))) test)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR 
TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (and (> 
(TOK_TABLE_OR_COL locid) 0) (= (TOK_TABLE_OR_COL year) '2001')))))
 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage

Modified: 
hive/trunk/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out
URL: 
http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out?rev=1552449&r1=1552448&r2=1552449&view=diff
==============================================================================
--- 
hive/trunk/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out
 (original)
+++ 
hive/trunk/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out
 Thu Dec 19 22:49:01 2013
@@ -1,43 +1,43 @@
-PREHOOK: query: create table dynamic_part_table(intcol int) partitioned by 
(partcol1 int, partcol2 int)
+PREHOOK: query: create table dynamic_part_table(intcol string) partitioned by 
(partcol1 string, partcol2 string)
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table dynamic_part_table(intcol int) partitioned by 
(partcol1 int, partcol2 int)
+POSTHOOK: query: create table dynamic_part_table(intcol string) partitioned by 
(partcol1 string, partcol2 string)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: default@dynamic_part_table
-PREHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select 1, 1, 1 from src where key=150
+PREHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select '1', '1', '1' from src where key=150
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@dynamic_part_table
-POSTHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select 1, 1, 1 from src where key=150
+POSTHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select '1', '1', '1' from src where key=150
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@dynamic_part_table@partcol1=1/partcol2=1
 POSTHOOK: Lineage: dynamic_part_table PARTITION(partcol1=1,partcol2=1).intcol 
SIMPLE []
-PREHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select 1, NULL, 1 from src where key=150
+PREHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select '1', NULL, '1' from src where key=150
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@dynamic_part_table
-POSTHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select 1, NULL, 1 from src where key=150
+POSTHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select '1', NULL, '1' from src where key=150
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: 
default@dynamic_part_table@partcol1=__HIVE_DEFAULT_PARTITION__/partcol2=1
 POSTHOOK: Lineage: dynamic_part_table PARTITION(partcol1=1,partcol2=1).intcol 
SIMPLE []
 POSTHOOK: Lineage: dynamic_part_table 
PARTITION(partcol1=__HIVE_DEFAULT_PARTITION__,partcol2=1).intcol SIMPLE []
-PREHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select 1, 1, NULL from src where key=150
+PREHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select '1', '1', NULL from src where key=150
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@dynamic_part_table
-POSTHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select 1, 1, NULL from src where key=150
+POSTHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select '1', '1', NULL from src where key=150
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: 
default@dynamic_part_table@partcol1=1/partcol2=__HIVE_DEFAULT_PARTITION__
 POSTHOOK: Lineage: dynamic_part_table PARTITION(partcol1=1,partcol2=1).intcol 
SIMPLE []
 POSTHOOK: Lineage: dynamic_part_table 
PARTITION(partcol1=1,partcol2=__HIVE_DEFAULT_PARTITION__).intcol SIMPLE []
 POSTHOOK: Lineage: dynamic_part_table 
PARTITION(partcol1=__HIVE_DEFAULT_PARTITION__,partcol2=1).intcol SIMPLE []
-PREHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select 1, NULL, NULL from src where key=150
+PREHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select '1', NULL, NULL from src where key=150
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@dynamic_part_table
-POSTHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select 1, NULL, NULL from src where key=150
+POSTHOOK: query: insert into table dynamic_part_table partition(partcol1, 
partcol2) select '1', NULL, NULL from src where key=150
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: 
default@dynamic_part_table@partcol1=__HIVE_DEFAULT_PARTITION__/partcol2=__HIVE_DEFAULT_PARTITION__
@@ -45,16 +45,16 @@ POSTHOOK: Lineage: dynamic_part_table PA
 POSTHOOK: Lineage: dynamic_part_table 
PARTITION(partcol1=1,partcol2=__HIVE_DEFAULT_PARTITION__).intcol SIMPLE []
 POSTHOOK: Lineage: dynamic_part_table 
PARTITION(partcol1=__HIVE_DEFAULT_PARTITION__,partcol2=1).intcol SIMPLE []
 POSTHOOK: Lineage: dynamic_part_table 
PARTITION(partcol1=__HIVE_DEFAULT_PARTITION__,partcol2=__HIVE_DEFAULT_PARTITION__).intcol
 SIMPLE []
-PREHOOK: query: explain extended select intcol from dynamic_part_table where 
partcol1=1 and partcol2=1
+PREHOOK: query: explain extended select intcol from dynamic_part_table where 
partcol1='1' and partcol2='1'
 PREHOOK: type: QUERY
-POSTHOOK: query: explain extended select intcol from dynamic_part_table where 
partcol1=1 and partcol2=1
+POSTHOOK: query: explain extended select intcol from dynamic_part_table where 
partcol1='1' and partcol2='1'
 POSTHOOK: type: QUERY
 POSTHOOK: Lineage: dynamic_part_table PARTITION(partcol1=1,partcol2=1).intcol 
SIMPLE []
 POSTHOOK: Lineage: dynamic_part_table 
PARTITION(partcol1=1,partcol2=__HIVE_DEFAULT_PARTITION__).intcol SIMPLE []
 POSTHOOK: Lineage: dynamic_part_table 
PARTITION(partcol1=__HIVE_DEFAULT_PARTITION__,partcol2=1).intcol SIMPLE []
 POSTHOOK: Lineage: dynamic_part_table 
PARTITION(partcol1=__HIVE_DEFAULT_PARTITION__,partcol2=__HIVE_DEFAULT_PARTITION__).intcol
 SIMPLE []
 ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME dynamic_part_table))) 
(TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL intcol))) (TOK_WHERE (and (= (TOK_TABLE_OR_COL partcol1) 1) 
(= (TOK_TABLE_OR_COL partcol2) 1)))))
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME dynamic_part_table))) 
(TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL intcol))) (TOK_WHERE (and (= (TOK_TABLE_OR_COL partcol1) '1') 
(= (TOK_TABLE_OR_COL partcol2) '1')))))
 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -73,7 +73,7 @@ STAGE PLANS:
             Select Operator
               expressions:
                     expr: intcol
-                    type: int
+                    type: string
               outputColumnNames: _col0
               Statistics:
                   numRows: 1 dataSize: 1 basicStatsState: COMPLETE 
colStatsState: NONE
@@ -90,7 +90,7 @@ STAGE PLANS:
                     output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
                       columns _col0
-                      columns.types int
+                      columns.types string
                       escape.delim \
                       hive.serialization.extend.nesting.levels true
                       serialization.format 1
@@ -114,14 +114,14 @@ STAGE PLANS:
               COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns intcol
-              columns.types int
+              columns.types string
 #### A masked pattern was here ####
               name default.dynamic_part_table
               numFiles 1
               numRows 1
               partition_columns partcol1/partcol2
               rawDataSize 1
-              serialization.ddl struct dynamic_part_table { i32 intcol}
+              serialization.ddl struct dynamic_part_table { string intcol}
               serialization.format 1
               serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               totalSize 2
@@ -133,11 +133,11 @@ STAGE PLANS:
               properties:
                 bucket_count -1
                 columns intcol
-                columns.types int
+                columns.types string
 #### A masked pattern was here ####
                 name default.dynamic_part_table
                 partition_columns partcol1/partcol2
-                serialization.ddl struct dynamic_part_table { i32 intcol}
+                serialization.ddl struct dynamic_part_table { string intcol}
                 serialization.format 1
                 serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 #### A masked pattern was here ####
@@ -151,16 +151,16 @@ STAGE PLANS:
     Fetch Operator
       limit: -1
 
-PREHOOK: query: explain extended select intcol from dynamic_part_table where 
partcol1=1 and partcol2=1
+PREHOOK: query: explain extended select intcol from dynamic_part_table where 
partcol1='1' and partcol2='1'
 PREHOOK: type: QUERY
-POSTHOOK: query: explain extended select intcol from dynamic_part_table where 
partcol1=1 and partcol2=1
+POSTHOOK: query: explain extended select intcol from dynamic_part_table where 
partcol1='1' and partcol2='1'
 POSTHOOK: type: QUERY
 POSTHOOK: Lineage: dynamic_part_table PARTITION(partcol1=1,partcol2=1).intcol 
SIMPLE []
 POSTHOOK: Lineage: dynamic_part_table 
PARTITION(partcol1=1,partcol2=__HIVE_DEFAULT_PARTITION__).intcol SIMPLE []
 POSTHOOK: Lineage: dynamic_part_table 
PARTITION(partcol1=__HIVE_DEFAULT_PARTITION__,partcol2=1).intcol SIMPLE []
 POSTHOOK: Lineage: dynamic_part_table 
PARTITION(partcol1=__HIVE_DEFAULT_PARTITION__,partcol2=__HIVE_DEFAULT_PARTITION__).intcol
 SIMPLE []
 ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME dynamic_part_table))) 
(TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL intcol))) (TOK_WHERE (and (= (TOK_TABLE_OR_COL partcol1) 1) 
(= (TOK_TABLE_OR_COL partcol2) 1)))))
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME dynamic_part_table))) 
(TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL intcol))) (TOK_WHERE (and (= (TOK_TABLE_OR_COL partcol1) '1') 
(= (TOK_TABLE_OR_COL partcol2) '1')))))
 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -179,7 +179,7 @@ STAGE PLANS:
             Select Operator
               expressions:
                     expr: intcol
-                    type: int
+                    type: string
               outputColumnNames: _col0
               Statistics:
                   numRows: 1 dataSize: 1 basicStatsState: COMPLETE 
colStatsState: NONE
@@ -196,7 +196,7 @@ STAGE PLANS:
                     output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
                       columns _col0
-                      columns.types int
+                      columns.types string
                       escape.delim \
                       hive.serialization.extend.nesting.levels true
                       serialization.format 1
@@ -220,14 +220,14 @@ STAGE PLANS:
               COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns intcol
-              columns.types int
+              columns.types string
 #### A masked pattern was here ####
               name default.dynamic_part_table
               numFiles 1
               numRows 1
               partition_columns partcol1/partcol2
               rawDataSize 1
-              serialization.ddl struct dynamic_part_table { i32 intcol}
+              serialization.ddl struct dynamic_part_table { string intcol}
               serialization.format 1
               serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               totalSize 2
@@ -239,11 +239,11 @@ STAGE PLANS:
               properties:
                 bucket_count -1
                 columns intcol
-                columns.types int
+                columns.types string
 #### A masked pattern was here ####
                 name default.dynamic_part_table
                 partition_columns partcol1/partcol2
-                serialization.ddl struct dynamic_part_table { i32 intcol}
+                serialization.ddl struct dynamic_part_table { string intcol}
                 serialization.format 1
                 serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 #### A masked pattern was here ####
@@ -257,16 +257,16 @@ STAGE PLANS:
     Fetch Operator
       limit: -1
 
-PREHOOK: query: explain extended select intcol from dynamic_part_table where 
(partcol1=1 and partcol2=1)or (partcol1=1 and 
partcol2='__HIVE_DEFAULT_PARTITION__')
+PREHOOK: query: explain extended select intcol from dynamic_part_table where 
(partcol1='1' and partcol2='1')or (partcol1='1' and 
partcol2='__HIVE_DEFAULT_PARTITION__')
 PREHOOK: type: QUERY
-POSTHOOK: query: explain extended select intcol from dynamic_part_table where 
(partcol1=1 and partcol2=1)or (partcol1=1 and 
partcol2='__HIVE_DEFAULT_PARTITION__')
+POSTHOOK: query: explain extended select intcol from dynamic_part_table where 
(partcol1='1' and partcol2='1')or (partcol1='1' and 
partcol2='__HIVE_DEFAULT_PARTITION__')
 POSTHOOK: type: QUERY
 POSTHOOK: Lineage: dynamic_part_table PARTITION(partcol1=1,partcol2=1).intcol 
SIMPLE []
 POSTHOOK: Lineage: dynamic_part_table 
PARTITION(partcol1=1,partcol2=__HIVE_DEFAULT_PARTITION__).intcol SIMPLE []
 POSTHOOK: Lineage: dynamic_part_table 
PARTITION(partcol1=__HIVE_DEFAULT_PARTITION__,partcol2=1).intcol SIMPLE []
 POSTHOOK: Lineage: dynamic_part_table 
PARTITION(partcol1=__HIVE_DEFAULT_PARTITION__,partcol2=__HIVE_DEFAULT_PARTITION__).intcol
 SIMPLE []
 ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME dynamic_part_table))) 
(TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL intcol))) (TOK_WHERE (or (and (= (TOK_TABLE_OR_COL partcol1) 
1) (= (TOK_TABLE_OR_COL partcol2) 1)) (and (= (TOK_TABLE_OR_COL partcol1) 1) (= 
(TOK_TABLE_OR_COL partcol2) '__HIVE_DEFAULT_PARTITION__'))))))
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME dynamic_part_table))) 
(TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL intcol))) (TOK_WHERE (or (and (= (TOK_TABLE_OR_COL partcol1) 
'1') (= (TOK_TABLE_OR_COL partcol2) '1')) (and (= (TOK_TABLE_OR_COL partcol1) 
'1') (= (TOK_TABLE_OR_COL partcol2) '__HIVE_DEFAULT_PARTITION__'))))))
 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -285,7 +285,7 @@ STAGE PLANS:
             Select Operator
               expressions:
                     expr: intcol
-                    type: int
+                    type: string
               outputColumnNames: _col0
               Statistics:
                   numRows: 2 dataSize: 2 basicStatsState: COMPLETE 
colStatsState: NONE
@@ -302,7 +302,7 @@ STAGE PLANS:
                     output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
                       columns _col0
-                      columns.types int
+                      columns.types string
                       escape.delim \
                       hive.serialization.extend.nesting.levels true
                       serialization.format 1
@@ -326,14 +326,14 @@ STAGE PLANS:
               COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns intcol
-              columns.types int
+              columns.types string
 #### A masked pattern was here ####
               name default.dynamic_part_table
               numFiles 1
               numRows 1
               partition_columns partcol1/partcol2
               rawDataSize 1
-              serialization.ddl struct dynamic_part_table { i32 intcol}
+              serialization.ddl struct dynamic_part_table { string intcol}
               serialization.format 1
               serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               totalSize 2
@@ -345,11 +345,11 @@ STAGE PLANS:
               properties:
                 bucket_count -1
                 columns intcol
-                columns.types int
+                columns.types string
 #### A masked pattern was here ####
                 name default.dynamic_part_table
                 partition_columns partcol1/partcol2
-                serialization.ddl struct dynamic_part_table { i32 intcol}
+                serialization.ddl struct dynamic_part_table { string intcol}
                 serialization.format 1
                 serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 #### A masked pattern was here ####
@@ -368,14 +368,14 @@ STAGE PLANS:
               COLUMN_STATS_ACCURATE true
               bucket_count -1
               columns intcol
-              columns.types int
+              columns.types string
 #### A masked pattern was here ####
               name default.dynamic_part_table
               numFiles 1
               numRows 1
               partition_columns partcol1/partcol2
               rawDataSize 1
-              serialization.ddl struct dynamic_part_table { i32 intcol}
+              serialization.ddl struct dynamic_part_table { string intcol}
               serialization.format 1
               serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               totalSize 2
@@ -387,11 +387,11 @@ STAGE PLANS:
               properties:
                 bucket_count -1
                 columns intcol
-                columns.types int
+                columns.types string
 #### A masked pattern was here ####
                 name default.dynamic_part_table
                 partition_columns partcol1/partcol2
-                serialization.ddl struct dynamic_part_table { i32 intcol}
+                serialization.ddl struct dynamic_part_table { string intcol}
                 serialization.format 1
                 serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 #### A masked pattern was here ####


Reply via email to