http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q 
b/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q
index 99bd780..9c351ea 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q
@@ -23,69 +23,69 @@ set hive.map.aggr.hash.percentmemory=0.0f;
 -- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * 
sizeOfGroupingSet)
 -- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct)
 
-create table if not exists loc_staging (
+create table if not exists loc_staging_n1 (
   state string,
   locid int,
   zip bigint,
   year int
 ) row format delimited fields terminated by '|' stored as textfile;
 
-create table loc_orc like loc_staging;
-alter table loc_orc set fileformat orc;
+create table loc_orc_n1 like loc_staging_n1;
+alter table loc_orc_n1 set fileformat orc;
 
-load data local inpath '../../data/files/loc.txt' overwrite into table 
loc_staging;
+load data local inpath '../../data/files/loc.txt' overwrite into table 
loc_staging_n1;
 
-insert overwrite table loc_orc select * from loc_staging;
+insert overwrite table loc_orc_n1 select * from loc_staging_n1;
 
 -- numRows: 8 rawDataSize: 796
-explain select * from loc_orc;
+explain select * from loc_orc_n1;
 
 -- partial column stats
-analyze table loc_orc compute statistics for columns state;
+analyze table loc_orc_n1 compute statistics for columns state;
 
 -- inner group by: map - numRows: 8 reduce - numRows: 4
 -- outer group by: map - numRows: 4 reduce numRows: 2
 explain select a, c, min(b)
 from ( select state as a, locid as b, count(*) as c
-       from loc_orc
+       from loc_orc_n1
        group by state,locid
      ) sq1
 group by a,c;
 
-analyze table loc_orc compute statistics for columns state,locid,year;
+analyze table loc_orc_n1 compute statistics for columns state,locid,year;
 
 -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 
8
 -- Case 9: column stats, NO grouping sets - caridnality = 2
-explain select year from loc_orc group by year;
+explain select year from loc_orc_n1 group by year;
 
 -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 
8
 -- Case 9: column stats, NO grouping sets - caridnality = 8
-explain select state,locid from loc_orc group by state,locid;
+explain select state,locid from loc_orc_n1 group by state,locid;
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
 -- Case 8: column stats, grouping sets - cardinality = 32
-explain select state,locid from loc_orc group by state,locid with cube;
+explain select state,locid from loc_orc_n1 group by state,locid with cube;
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
 -- Case 8: column stats, grouping sets - cardinality = 24
-explain select state,locid from loc_orc group by state,locid with rollup;
-explain select state,locid from loc_orc group by rollup (state,locid);
+explain select state,locid from loc_orc_n1 group by state,locid with rollup;
+explain select state,locid from loc_orc_n1 group by rollup (state,locid);
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8
 -- Case 8: column stats, grouping sets - cardinality = 8
-explain select state,locid from loc_orc group by state,locid grouping 
sets((state));
+explain select state,locid from loc_orc_n1 group by state,locid grouping 
sets((state));
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16
 -- Case 8: column stats, grouping sets - cardinality = 16
-explain select state,locid from loc_orc group by state,locid grouping 
sets((state),(locid));
+explain select state,locid from loc_orc_n1 group by state,locid grouping 
sets((state),(locid));
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
 -- Case 8: column stats, grouping sets - cardinality = 24
-explain select state,locid from loc_orc group by state,locid grouping 
sets((state),(locid),());
+explain select state,locid from loc_orc_n1 group by state,locid grouping 
sets((state),(locid),());
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
 -- Case 8: column stats, grouping sets - cardinality = 32
-explain select state,locid from loc_orc group by state,locid grouping 
sets((state,locid),(state),(locid),());
+explain select state,locid from loc_orc_n1 group by state,locid grouping 
sets((state,locid),(state),(locid),());
 
 set hive.map.aggr.hash.percentmemory=0.5f;
 set mapred.max.split.size=80;
@@ -93,52 +93,52 @@ set mapred.max.split.size=80;
 
 -- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4
 -- Case 9: column stats, NO grouping sets - caridnality = 2
-explain select year from loc_orc group by year;
+explain select year from loc_orc_n1 group by year;
 
 -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16
 -- Case 8: column stats, grouping sets - cardinality = 16
-explain select state,locid from loc_orc group by state,locid with cube;
+explain select state,locid from loc_orc_n1 group by state,locid with cube;
 
 -- ndvProduct becomes 0 as zip does not have column stats
 -- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4
 -- Case 9: column stats, NO grouping sets - caridnality = 2
-explain select state,zip from loc_orc group by state,zip;
+explain select state,zip from loc_orc_n1 group by state,zip;
 
 set mapred.max.split.size=1000;
 set hive.stats.fetch.column.stats=false;
 
 -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - 
cardinality = 32
 -- Case 7: NO column stats - cardinality = 16
-explain select state,locid from loc_orc group by state,locid with cube;
+explain select state,locid from loc_orc_n1 group by state,locid with cube;
 
 -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - 
cardinality = 24
 -- Case 7: NO column stats - cardinality = 12
-explain select state,locid from loc_orc group by state,locid with rollup;
-explain select state,locid from loc_orc group by rollup (state,locid);
+explain select state,locid from loc_orc_n1 group by state,locid with rollup;
+explain select state,locid from loc_orc_n1 group by rollup (state,locid);
 
 -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - 
cardinality = 8
 -- Case 7: NO column stats - cardinality = 4
-explain select state,locid from loc_orc group by state,locid grouping 
sets((state));
+explain select state,locid from loc_orc_n1 group by state,locid grouping 
sets((state));
 
 -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - 
cardinality = 16
 -- Case 7: NO column stats - cardinality = 8
-explain select state,locid from loc_orc group by state,locid grouping 
sets((state),(locid));
+explain select state,locid from loc_orc_n1 group by state,locid grouping 
sets((state),(locid));
 
 -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - 
cardinality = 24
 -- Case 7: NO column stats - cardinality = 12
-explain select state,locid from loc_orc group by state,locid grouping 
sets((state),(locid),());
+explain select state,locid from loc_orc_n1 group by state,locid grouping 
sets((state),(locid),());
 
 -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - 
cardinality = 32
 -- Case 7: NO column stats - cardinality = 16
-explain select state,locid from loc_orc group by state,locid grouping 
sets((state,locid),(state),(locid),());
+explain select state,locid from loc_orc_n1 group by state,locid grouping 
sets((state,locid),(state),(locid),());
 
 set mapred.max.split.size=80;
 
 -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - 
cardinality = 8
 -- Case 7: NO column stats - cardinality = 4
-explain select year from loc_orc group by year;
+explain select year from loc_orc_n1 group by year;
 
 -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - 
cardinality = 32
 -- Case 7: NO column stats - cardinality = 16
-explain select state,locid from loc_orc group by state,locid with cube;
+explain select state,locid from loc_orc_n1 group by state,locid with cube;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q 
b/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
index 9137669..e4163fa 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
@@ -10,20 +10,20 @@ set hive.exec.reducers.max = 1;
 set hive.transpose.aggr.join=true;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) 
INTO 2 BUCKETS;
-CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) 
INTO 2 BUCKETS;
+CREATE TABLE tbl1_n13(key int, value string) CLUSTERED BY (key) SORTED BY 
(key) INTO 2 BUCKETS;
+CREATE TABLE tbl2_n12(key int, value string) CLUSTERED BY (key) SORTED BY 
(key) INTO 2 BUCKETS;
 
-insert overwrite table tbl1
+insert overwrite table tbl1_n13
 select * from src where key < 10;
 
-insert overwrite table tbl2
+insert overwrite table tbl2_n12
 select * from src where key < 10;
 
-analyze table tbl1 compute statistics;
-analyze table tbl1 compute statistics for columns; 
+analyze table tbl1_n13 compute statistics;
+analyze table tbl1_n13 compute statistics for columns; 
 
-analyze table tbl2 compute statistics;
-analyze table tbl2 compute statistics for columns;
+analyze table tbl2_n12 compute statistics;
+analyze table tbl2_n12 compute statistics for columns;
 
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
@@ -34,11 +34,11 @@ set hive.auto.convert.sortmerge.join=true;
 -- The join is being performed as part of sub-query. It should be converted to 
a sort-merge join
 explain
 select count(*) from (
-  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 
b on a.key = b.key
+  select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join 
tbl2_n12 b on a.key = b.key
 ) subq1;
 
 select count(*) from (
-  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 
b on a.key = b.key
+  select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join 
tbl2_n12 b on a.key = b.key
 ) subq1;
 
 -- The join is being performed as part of more than one sub-query. It should 
be converted to a sort-merge join
@@ -47,7 +47,7 @@ select count(*) from
 (
   select key, count(*) from 
   (
-    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key
+    select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join 
tbl2_n12 b on a.key = b.key
   ) subq1
   group by key
 ) subq2;
@@ -56,7 +56,7 @@ select count(*) from
 (
   select key, count(*) from 
   (
-    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key
+    select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join 
tbl2_n12 b on a.key = b.key
   ) subq1
   group by key
 ) subq2;
@@ -68,14 +68,14 @@ select src1.key, src1.cnt1, src2.cnt1 from
 (
   select key, count(*) as cnt1 from 
   (
-    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key
+    select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join 
tbl2_n12 b on a.key = b.key
   ) subq1 group by key
 ) src1
 join
 (
   select key, count(*) as cnt1 from 
   (
-    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key
+    select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join 
tbl2_n12 b on a.key = b.key
   ) subq2 group by key
 ) src2
 on src1.key = src2.key;
@@ -84,14 +84,14 @@ select src1.key, src1.cnt1, src2.cnt1 from
 (
   select key, count(*) as cnt1 from 
   (
-    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key
+    select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join 
tbl2_n12 b on a.key = b.key
   ) subq1 group by key
 ) src1
 join
 (
   select key, count(*) as cnt1 from 
   (
-    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key
+    select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join 
tbl2_n12 b on a.key = b.key
   ) subq2 group by key
 ) src2
 on src1.key = src2.key;
@@ -100,15 +100,15 @@ on src1.key = src2.key;
 -- be converted to a sort-merge join.
 explain
 select count(*) from 
-  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+  (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 
     join
-  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2
   on subq1.key = subq2.key;
 
 select count(*) from 
-  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+  (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 
     join
-  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2
   on subq1.key = subq2.key;
 
 -- The subquery itself is being joined. Since the sub-query only contains 
selects and filters, it should 
@@ -118,22 +118,22 @@ select count(*) from
   (
   select * from
     (
-      select a.key as key, a.value as value from tbl1 a where key < 8
+      select a.key as key, a.value as value from tbl1_n13 a where key < 8
     ) subq1 
   where key < 6
   ) subq2
-  join tbl2 b
+  join tbl2_n12 b
   on subq2.key = b.key;
 
 select count(*) from 
   (
   select * from
     (
-      select a.key as key, a.value as value from tbl1 a where key < 8
+      select a.key as key, a.value as value from tbl1_n13 a where key < 8
     ) subq1 
   where key < 6
   ) subq2
-  join tbl2 b
+  join tbl2_n12 b
   on subq2.key = b.key;
 
 -- Both the tables are nested sub-queries i.e more then 1 level of sub-query.
@@ -143,7 +143,7 @@ select count(*) from
   (
   select * from
     (
-      select a.key as key, a.value as value from tbl1 a where key < 8
+      select a.key as key, a.value as value from tbl1_n13 a where key < 8
     ) subq1 
   where key < 6
   ) subq2
@@ -151,7 +151,7 @@ select count(*) from
   (
   select * from
     (
-      select a.key as key, a.value as value from tbl1 a where key < 8
+      select a.key as key, a.value as value from tbl1_n13 a where key < 8
     ) subq3 
   where key < 6
   ) subq4
@@ -161,7 +161,7 @@ select count(*) from
   (
   select * from
     (
-      select a.key as key, a.value as value from tbl1 a where key < 8
+      select a.key as key, a.value as value from tbl1_n13 a where key < 8
     ) subq1 
   where key < 6
   ) subq2
@@ -169,7 +169,7 @@ select count(*) from
   (
   select * from
     (
-      select a.key as key, a.value as value from tbl1 a where key < 8
+      select a.key as key, a.value as value from tbl1_n13 a where key < 8
     ) subq3 
   where key < 6
   ) subq4
@@ -180,62 +180,62 @@ select count(*) from
 -- item, but that is not part of the join key.
 explain
 select count(*) from 
-  (select a.key as key, concat(a.value, a.value) as value from tbl1 a where 
key < 8) subq1 
+  (select a.key as key, concat(a.value, a.value) as value from tbl1_n13 a 
where key < 8) subq1 
     join
-  (select a.key as key, concat(a.value, a.value) as value from tbl2 a where 
key < 8) subq2
+  (select a.key as key, concat(a.value, a.value) as value from tbl2_n12 a 
where key < 8) subq2
   on subq1.key = subq2.key;
 
 select count(*) from 
-  (select a.key as key, concat(a.value, a.value) as value from tbl1 a where 
key < 8) subq1 
+  (select a.key as key, concat(a.value, a.value) as value from tbl1_n13 a 
where key < 8) subq1 
     join
-  (select a.key as key, concat(a.value, a.value) as value from tbl2 a where 
key < 8) subq2
+  (select a.key as key, concat(a.value, a.value) as value from tbl2_n12 a 
where key < 8) subq2
   on subq1.key = subq2.key;
 
 -- Since the join key is modified by the sub-query, neither sort-merge join 
not bucketized map-side
 -- join should be performed
 explain
 select count(*) from 
-  (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) 
subq1 
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n13 a) 
subq1 
     join
-  (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n12 a) 
subq2
   on subq1.key = subq2.key;
 
 select count(*) from 
-  (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) 
subq1 
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n13 a) 
subq1 
     join
-  (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n12 a) 
subq2
   on subq1.key = subq2.key;
 
 -- One of the tables is a sub-query and the other is not.
 -- It should be converted to a sort-merge join.
 explain
 select count(*) from 
-  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
-    join tbl2 a on subq1.key = a.key;
+  (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 
+    join tbl2_n12 a on subq1.key = a.key;
 
 select count(*) from 
-  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
-    join tbl2 a on subq1.key = a.key;
+  (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 
+    join tbl2_n12 a on subq1.key = a.key;
 
 -- There are more than 2 inputs to the join, all of them being sub-queries. 
 -- It should be converted to to a sort-merge join
 explain
 select count(*) from 
-  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+  (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 
     join
-  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2
   on (subq1.key = subq2.key)
     join
-  (select a.key as key, a.value as value from tbl2 a where key < 6) subq3
+  (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq3
   on (subq1.key = subq3.key);
 
 select count(*) from 
-  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+  (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 
     join
-  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2
   on subq1.key = subq2.key
     join
-  (select a.key as key, a.value as value from tbl2 a where key < 6) subq3
+  (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq3
   on (subq1.key = subq3.key);
 
 -- The join is being performed on a nested sub-query, and an aggregation is 
performed after that.
@@ -246,11 +246,11 @@ select count(*) from (
   (
     select * from
     (
-      select a.key as key, a.value as value from tbl1 a where key < 8
+      select a.key as key, a.value as value from tbl1_n13 a where key < 8
     ) subq1
     where key < 6
   ) subq2
-join tbl2 b
+join tbl2_n12 b
 on subq2.key = b.key) a;
 
 select count(*) from (
@@ -258,22 +258,22 @@ select count(*) from (
   (
     select * from
     (
-      select a.key as key, a.value as value from tbl1 a where key < 8
+      select a.key as key, a.value as value from tbl1_n13 a where key < 8
     ) subq1
     where key < 6
   ) subq2
-join tbl2 b
+join tbl2_n12 b
 on subq2.key = b.key) a;
 
 -- The join is followed by a multi-table insert. It should be converted to
 -- a sort-merge join
-explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key;
+explain select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a 
join tbl2_n12 b on a.key = b.key;
 
-select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b 
on a.key = b.key;
+select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join 
tbl2_n12 b on a.key = b.key;
 
 -- The join is followed by a multi-table insert, and one of the inserts 
involves a reducer.
 -- It should be converted to a sort-merge join
-explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key;
+explain select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a 
join tbl2_n12 b on a.key = b.key;
 
-select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b 
on a.key = b.key;
+select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join 
tbl2_n12 b on a.key = b.key;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_rp_auto_join17.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_auto_join17.q 
b/ql/src/test/queries/clientpositive/cbo_rp_auto_join17.q
index ae17ec6..78c1936 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_auto_join17.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_auto_join17.q
@@ -4,14 +4,14 @@ set hive.mapred.mode=nonstrict;
 set hive.cbo.returnpath.hiveop=true;
 set hive.auto.convert.join = true;
 
-CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS 
TEXTFILE;
+CREATE TABLE dest1_n112(key1 INT, value1 STRING, key2 INT, value2 STRING) 
STORED AS TEXTFILE;
 
 explain
 FROM src src1 JOIN src src2 ON (src1.key = src2.key)
-INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*;
+INSERT OVERWRITE TABLE dest1_n112 SELECT src1.*, src2.*;
 
 
 FROM src src1 JOIN src src2 ON (src1.key = src2.key)
-INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*;
+INSERT OVERWRITE TABLE dest1_n112 SELECT src1.*, src2.*;
 
-SELECT sum(hash(dest1.key1,dest1.value1,dest1.key2,dest1.value2)) FROM dest1;
\ No newline at end of file
+SELECT 
sum(hash(dest1_n112.key1,dest1_n112.value1,dest1_n112.key2,dest1_n112.value2)) 
FROM dest1_n112;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_rp_cross_product_check_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_cross_product_check_2.q 
b/ql/src/test/queries/clientpositive/cbo_rp_cross_product_check_2.q
index 6ada9ae..00c19c7 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_cross_product_check_2.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_cross_product_check_2.q
@@ -4,10 +4,10 @@ set hive.cbo.returnpath.hiveop=true;
 set hive.explain.user=false;
 -- SORT_QUERY_RESULTS
 
-create table A as
+create table A_n18 as
 select * from src;
 
-create table B as
+create table B_n14 as
 select * from src order by key
 limit 10;
 
@@ -15,19 +15,19 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join.noconditionaltask.size=10000000;
 
-explain select * from A join B;
+explain select * from A_n18 join B_n14;
 
-explain select * from B d1 join B d2 on d1.key = d2.key join A;
+explain select * from B_n14 d1 join B_n14 d2 on d1.key = d2.key join A_n18;
 
-explain select * from A join 
+explain select * from A_n18 join 
          (select d1.key 
-          from B d1 join B d2 on d1.key = d2.key 
+          from B_n14 d1 join B_n14 d2 on d1.key = d2.key 
           where 1 = 1 group by d1.key) od1;
 
-explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 
group by d1.key) od1;
+explain select * from A_n18 join (select d1.key from B_n14 d1 join B_n14 d2 
where 1 = 1 group by d1.key) od1;
 
 explain select * from 
-(select A.key from A group by key) ss join 
-(select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by 
d1.key) od1;
+(select A_n18.key from A_n18 group by key) ss join 
+(select d1.key from B_n14 d1 join B_n14 d2 on d1.key = d2.key where 1 = 1 
group by d1.key) od1;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_rp_gby2_map_multi_distinct.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/cbo_rp_gby2_map_multi_distinct.q 
b/ql/src/test/queries/clientpositive/cbo_rp_gby2_map_multi_distinct.q
index 0e73621..cfb0b79 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_gby2_map_multi_distinct.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_gby2_map_multi_distinct.q
@@ -9,32 +9,32 @@ set mapred.reduce.tasks=31;
 
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS 
TEXTFILE;
+CREATE TABLE dest1_n166(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED 
AS TEXTFILE;
 
 EXPLAIN
 FROM src
-INSERT OVERWRITE TABLE dest1
+INSERT OVERWRITE TABLE dest1_n166
 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), 
concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT 
substr(src.value, 5)), count(src.value)
 GROUP BY substr(src.key,1,1);
 
 FROM src
-INSERT OVERWRITE TABLE dest1
+INSERT OVERWRITE TABLE dest1_n166
 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), 
concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT 
substr(src.value, 5)), count(src.value)
 GROUP BY substr(src.key,1,1);
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n166.* FROM dest1_n166;
 
 -- HIVE-5560 when group by key is used in distinct funtion, invalid result are 
returned
 
 EXPLAIN
 FROM src
-INSERT OVERWRITE TABLE dest1
+INSERT OVERWRITE TABLE dest1_n166
 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), 
concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT 
substr(src.value, 5)), count(src.value)
 GROUP BY substr(src.key,1,1);
 
 FROM src
-INSERT OVERWRITE TABLE dest1
+INSERT OVERWRITE TABLE dest1_n166
 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), 
concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT 
substr(src.value, 5)), count(src.value)
 GROUP BY substr(src.key,1,1);
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n166.* FROM dest1_n166;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q 
b/ql/src/test/queries/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q
index ff2fbfb..ebd08bb 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q
@@ -5,11 +5,11 @@ set hive.mapred.mode=nonstrict;
 set hive.groupby.skewindata=false;
 set mapred.reduce.tasks=31;
 
-CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 
DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS 
TEXTFILE;
+CREATE TABLE dest1_n123(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, 
c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS 
TEXTFILE;
 
 EXPLAIN
 FROM src
-INSERT OVERWRITE TABLE dest1 SELECT
+INSERT OVERWRITE TABLE dest1_n123 SELECT
   sum(substr(src.value,5)),
   avg(substr(src.value,5)),
   avg(DISTINCT substr(src.value,5)),
@@ -23,7 +23,7 @@ INSERT OVERWRITE TABLE dest1 SELECT
   count(DISTINCT substr(src.value, 5));
 
 FROM src
-INSERT OVERWRITE TABLE dest1 SELECT
+INSERT OVERWRITE TABLE dest1_n123 SELECT
   sum(substr(src.value,5)),
   avg(substr(src.value,5)),
   avg(DISTINCT substr(src.value,5)),
@@ -36,5 +36,5 @@ INSERT OVERWRITE TABLE dest1 SELECT
   sum(DISTINCT substr(src.value, 5)),
   count(DISTINCT substr(src.value, 5));
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n123.* FROM dest1_n123;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_rp_insert.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_insert.q 
b/ql/src/test/queries/clientpositive/cbo_rp_insert.q
index eeaeec2..4c493db 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_insert.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_insert.q
@@ -4,14 +4,14 @@ set hive.cbo.returnpath.hiveop=true;
 drop database if exists x314 cascade;
 create database x314;
 use x314;
-create table source(s1 int, s2 int);
-create table target1(x int, y int, z int);
+create table source_n1(s1 int, s2 int);
+create table target1_n0(x int, y int, z int);
 
-insert into source(s2,s1) values(2,1);
--- expect source to contain 1 row (1,2)
-select * from source;
-insert into target1(z,x) select * from source;
--- expect target1 to contain 1 row (2,NULL,1)
-select * from target1;
+insert into source_n1(s2,s1) values(2,1);
+-- expect source_n1 to contain 1 row (1,2)
+select * from source_n1;
+insert into target1_n0(z,x) select * from source_n1;
+-- expect target1_n0 to contain 1 row (2,NULL,1)
+select * from target1_n0;
 
 drop database if exists x314 cascade;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_rp_join1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_join1.q 
b/ql/src/test/queries/clientpositive/cbo_rp_join1.q
index 6f637d9..2be8c2c 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_join1.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_join1.q
@@ -1,21 +1,21 @@
 set hive.mapred.mode=nonstrict;
 set hive.auto.convert.join = true;
 
-CREATE TABLE myinput1(key int, value int);
-LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1;
+CREATE TABLE myinput1_n0(key int, value int);
+LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1_n0;
 
 SET hive.optimize.bucketmapjoin = true;
 SET hive.optimize.bucketmapjoin.sortedmerge = true;
 SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
 
-EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL 
OUTER JOIN myinput1 b on a.key = 40 AND b.key = 40;
-SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN 
myinput1 b on a.key = 40 AND b.key = 40;
+EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL 
OUTER JOIN myinput1_n0 b on a.key = 40 AND b.key = 40;
+SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER 
JOIN myinput1_n0 b on a.key = 40 AND b.key = 40;
 
-EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL 
OUTER JOIN myinput1 b on a.key = 40 AND a.value = 40 AND a.key = a.value AND 
b.key = 40;
-SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN 
myinput1 b on a.key = 40 AND a.key = a.value AND b.key = 40;
+EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL 
OUTER JOIN myinput1_n0 b on a.key = 40 AND a.value = 40 AND a.key = a.value AND 
b.key = 40;
+SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER 
JOIN myinput1_n0 b on a.key = 40 AND a.key = a.value AND b.key = 40;
 
-EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL 
OUTER JOIN myinput1 b on a.key = 40 AND a.key = b.key AND b.key = 40;
-SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN 
myinput1 b on a.key = 40 AND a.key = b.key AND b.key = 40;
+EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL 
OUTER JOIN myinput1_n0 b on a.key = 40 AND a.key = b.key AND b.key = 40;
+SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER 
JOIN myinput1_n0 b on a.key = 40 AND a.key = b.key AND b.key = 40;
 
-EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL 
OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND 
b.key > 40 AND b.value > 50 AND b.key = b.value;
-SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN 
myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 
AND b.value > 50 AND b.key = b.value;
+EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL 
OUTER JOIN myinput1_n0 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND 
b.key > 40 AND b.value > 50 AND b.key = b.value;
+SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER 
JOIN myinput1_n0 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key 
> 40 AND b.value > 50 AND b.key = b.value;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_rp_lineage2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_lineage2.q 
b/ql/src/test/queries/clientpositive/cbo_rp_lineage2.q
index 47c8168..fc22431 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_lineage2.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_lineage2.q
@@ -1,54 +1,54 @@
---! qt:dataset:src1
---! qt:dataset:src
-set hive.mapred.mode=nonstrict;
-set hive.cbo.returnpath.hiveop=true;
-set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.LineageLogger;
-
-drop table if exists src2;
-create table src2 as select key key2, value value2 from src1;
-
-select * from src1 where key is not null and value is not null limit 3;
-select * from src1 where key > 10 and value > 'val' order by key limit 5;
-
-drop table if exists dest1;
-create table dest1 as select * from src1;
-insert into table dest1 select * from src2;
-
-select key k, dest1.value from dest1;
-select key from src1 union select key2 from src2 order by key;
-select key k from src1 union select key2 from src2 order by k;
-
-select key, count(1) a from dest1 group by key;
-select key k, count(*) from dest1 group by key;
-select key k, count(value) from dest1 group by key;
-select value, max(length(key)) from dest1 group by value;
-select value, max(length(key)) from dest1 group by value order by value limit 
5;
-
-select key, length(value) from dest1;
-select length(value) + 3 from dest1;
-select 5 from dest1;
-select 3 * 5 from dest1;
-
-drop table if exists dest2;
-create table dest2 as select * from src1 JOIN src2 ON src1.key = src2.key2;
-insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = 
src2.key2;
-insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2;
-insert into table dest2
-  select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1;
-
-select * from src1 where length(key) > 2;
-select * from src1 where length(key) > 2 and value > 'a';
-
-drop table if exists dest3;
-create table dest3 as
-  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 1;
-insert overwrite table dest2
-  select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3;
-
-drop table if exists dest_l1;
-CREATE TABLE dest_l1(key INT, value STRING) STORED AS TEXTFILE;
-
-INSERT OVERWRITE TABLE dest_l1
+--! qt_n16:dataset_n16:src1
+--! qt_n16:dataset_n16:src
+set_n16 hive.mapred.mode=nonstrict_n16;
+set_n16 hive.cbo.returnpath.hiveop=true;
+set_n16 hive.exec.post_n16.hooks=org.apache.hadoop.hive.ql.hooks.LineageLogger;
+
+drop table if exists src2_n1;
+create table src2_n1 as select_n16 key key2, value value2 from src1;
+
+select_n16 * from src1 where key is not_n16 null and value is not_n16 null 
limit_n16 3;
+select_n16 * from src1 where key > 10 and value > 'val' order by key limit_n16 
5;
+
+drop table if exists dest1_n95;
+create table dest1_n95 as select_n16 * from src1;
+insert_n16 into table dest1_n95 select_n16 * from src2_n1;
+
+select_n16 key k, dest1_n95.value from dest1_n95;
+select_n16 key from src1 union select_n16 key2 from src2_n1 order by key;
+select_n16 key k from src1 union select_n16 key2 from src2_n1 order by k;
+
+select_n16 key, count_n16(1) a from dest1_n95 group by key;
+select_n16 key k, count_n16(*) from dest1_n95 group by key;
+select_n16 key k, count_n16(value) from dest1_n95 group by key;
+select_n16 value, max(length(key)) from dest1_n95 group by value;
+select_n16 value, max(length(key)) from dest1_n95 group by value order by 
value limit_n16 5;
+
+select_n16 key, length(value) from dest1_n95;
+select_n16 length(value) + 3 from dest1_n95;
+select_n16 5 from dest1_n95;
+select_n16 3 * 5 from dest1_n95;
+
+drop table if exists dest2_n25;
+create table dest2_n25 as select_n16 * from src1 JOIN src2_n1 ON src1.key = 
src2_n1.key2;
+insert_n16 overwrite table dest2_n25 select_n16 * from src1 JOIN src2_n1 ON 
src1.key = src2_n1.key2;
+insert_n16 into table dest2_n25 select_n16 * from src1 JOIN src2_n1 ON 
src1.key = src2_n1.key2;
+insert_n16 into table dest2_n25
+  select_n16 * from src1 JOIN src2_n1 ON length(src1.value) = 
length(src2_n1.value2) + 1;
+
+select_n16 * from src1 where length(key) > 2;
+select_n16 * from src1 where length(key) > 2 and value > 'a';
+
+drop table if exists dest3_n3;
+create table dest3_n3 as
+  select_n16 * from src1 JOIN src2_n1 ON src1.key = src2_n1.key2 WHERE 
length(key) > 1;
+insert_n16 overwrite table dest2_n25
+  select_n16 * from src1 JOIN src2_n1 ON src1.key = src2_n1.key2 WHERE 
length(key) > 3;
+
+drop table if exists dest_l1_n1;
+CREATE TABLE dest_l1_n1(key INT, value STRING) STORED AS TEXTFILE;
+
+INSERT OVERWRITE TABLE dest_l1_n1
 SELECT j.*
 FROM (SELECT t1.key, p1.value
       FROM src1 t1
@@ -60,61 +60,61 @@ FROM (SELECT t1.key, p1.value
       LEFT OUTER JOIN src p2
       ON (t2.key = p2.key)) j;
 
-drop table if exists emp;
-drop table if exists dept;
-drop table if exists project;
-drop table if exists tgt;
-create table emp(emp_id int, name string, mgr_id int, dept_id int);
-create table dept(dept_id int, dept_name string);
-create table project(project_id int, project_name string);
-create table tgt(dept_name string, name string,
-  emp_id int, mgr_id int, proj_id int, proj_name string);
-
-INSERT INTO TABLE tgt
+drop table if exists emp_n1;
+drop table if exists dept_n0;
+drop table if exists project_n0;
+drop table if exists tgt_n0;
+create table emp_n1(emp_id int_n16, name string, mgr_id int_n16, dept_id 
int_n16);
+create table dept_n0(dept_id int_n16, dept_name string);
+create table project_n0(project_id int_n16, project_name string);
+create table tgt_n0(dept_name string, name string,
+  emp_id int_n16, mgr_id int_n16, proj_id int_n16, proj_name string);
+
+INSERT INTO TABLE tgt_n0
 SELECT emd.dept_name, emd.name, emd.emp_id, emd.mgr_id, p.project_id, 
p.project_name
 FROM (
   SELECT d.dept_name, em.name, em.emp_id, em.mgr_id, em.dept_id
   FROM (
     SELECT e.name, e.dept_id, e.emp_id emp_id, m.emp_id mgr_id
-    FROM emp e JOIN emp m ON e.emp_id = m.emp_id
+    FROM emp_n1 e JOIN emp_n1 m ON e.emp_id = m.emp_id
     ) em
-  JOIN dept d ON d.dept_id = em.dept_id
-  ) emd JOIN project p ON emd.dept_id = p.project_id;
+  JOIN dept_n0 d ON d.dept_id = em.dept_id
+  ) emd JOIN project_n0 p ON emd.dept_id = p.project_id;
 
-drop table if exists dest_l2;
-create table dest_l2 (id int, c1 tinyint, c2 int, c3 bigint) stored as 
textfile;
-insert into dest_l2 values(0, 1, 100, 10000);
+drop table if exists dest_l2_n0;
+create table dest_l2_n0 (id int_n16, c1 tinyint_n16, c2 int_n16, c3 
bigint_n16) stored as textfile;
+insert_n16 into dest_l2_n0 values(0, 1, 100, 10000);
 
-select * from (
-  select c1 + c2 x from dest_l2
+select_n16 * from (
+  select_n16 c1 + c2 x from dest_l2_n0
   union all
-  select sum(c3) y from (select c3 from dest_l2) v1) v2 order by x;
+  select_n16 sum(c3) y from (select_n16 c3 from dest_l2_n0) v1) v2 order by x;
 
-drop table if exists dest_l3;
-create table dest_l3 (id int, c1 string, c2 string, c3 int) stored as textfile;
-insert into dest_l3 values(0, "s1", "s2", 15);
+drop table if exists dest_l3_n0;
+create table dest_l3_n0 (id int_n16, c1 string, c2 string, c3 int_n16) stored 
as textfile;
+insert_n16 into dest_l3_n0 values(0, "s1", "s2", 15);
 
-select sum(a.c1) over (partition by a.c1 order by a.id)
-from dest_l2 a
+select_n16 sum(a.c1) over (partition by a.c1 order by a.id)
+from dest_l2_n0 a
 where a.c2 != 10
 group by a.c1, a.c2, a.id
-having count(a.c2) > 0;
+having count_n16(a.c2) > 0;
 
-select sum(a.c1), count(b.c1), b.c2, b.c3
-from dest_l2 a join dest_l3 b on (a.id = b.id)
+select_n16 sum(a.c1), count_n16(b.c1), b.c2, b.c3
+from dest_l2_n0 a join dest_l3_n0 b on (a.id = b.id)
 where a.c2 != 10 and b.c3 > 0
 group by a.c1, a.c2, a.id, b.c1, b.c2, b.c3
-having count(a.c2) > 0
-order by b.c3 limit 5;
+having count_n16(a.c2) > 0
+order by b.c3 limit_n16 5;
 
-drop table if exists t;
-create table t as
-select distinct a.c2, a.c3 from dest_l2 a
-inner join dest_l3 b on (a.id = b.id)
+drop table if exists t_n16;
+create table t_n16 as
+select_n16 distinct_n16 a.c2, a.c3 from dest_l2_n0 a
+inner join dest_l3_n0 b on (a.id = b.id)
 where a.id > 0 and b.c3 = 15;
 
-SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),
-concat(substr(src1.key,1,1),sum(substr(src1.value,5)))
+SELECT substr(src1.key,1,1), count_n16(DISTINCT substr(src1.value,5)),
+concat_n16(substr(src1.key,1,1),sum(substr(src1.value,5)))
 from src1
 GROUP BY substr(src1.key,1,1);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_rp_subq_exists.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_subq_exists.q 
b/ql/src/test/queries/clientpositive/cbo_rp_subq_exists.q
index 692bb41..b3e1af5 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_subq_exists.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_subq_exists.q
@@ -30,7 +30,7 @@ having not exists
 
 -- 19. SubQueries Exists
 -- view test
-create view cv1 as 
+create view cv1_n4 as 
 select * 
 from src_cbo b 
 where exists
@@ -39,7 +39,7 @@ where exists
   where b.value = a.value  and a.key = b.key and a.value > 'val_9')
 ;
 
-select * from cv1
+select * from cv1_n4
 ;
 
 -- sq in from

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q 
b/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q
index b8535cf..ba198eb 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q
@@ -5,96 +5,96 @@ set hive.cbo.returnpath.hiveop=true;
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- 0.23 changed input order of data in reducer task, which affects result of 
percentile_approx
 
-CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY 
(key DESC)  INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' 
INTO TABLE bucket;
-load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' 
INTO TABLE bucket;
-load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' 
INTO TABLE bucket;
-load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' 
INTO TABLE bucket;
+CREATE TABLE bucket_n1 (key double, value string) CLUSTERED BY (key) SORTED BY 
(key DESC)  INTO 4 BUCKETS STORED AS TEXTFILE;
+load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' 
INTO TABLE bucket_n1;
+load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' 
INTO TABLE bucket_n1;
+load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' 
INTO TABLE bucket_n1;
+load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' 
INTO TABLE bucket_n1;
 
-create table t1 (result double);
-create table t2 (result double);
-create table t3 (result double);
-create table t4 (result double);
-create table t5 (result double);
-create table t6 (result double);
-create table t7 (result array<double>);
-create table t8 (result array<double>);
-create table t9 (result array<double>);
-create table t10 (result array<double>);
-create table t11 (result array<double>);
-create table t12 (result array<double>);
+create table t1_n132 (result double);
+create table t2_n79 (result double);
+create table t3_n31 (result double);
+create table t4_n18 (result double);
+create table t5_n5 (result double);
+create table t6_n4 (result double);
+create table t7_n5 (result array<double>);
+create table t8_n3 (result array<double>);
+create table t9_n2 (result array<double>);
+create table t10_n1 (result array<double>);
+create table t11_n3 (result array<double>);
+create table t12_n1 (result array<double>);
 
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
 set hive.map.aggr=false;
 -- disable map-side aggregation
-FROM bucket
-insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5)
-insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 
100)
-insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 
1000)
+FROM bucket_n1
+insert overwrite table t1_n132 SELECT percentile_approx(cast(key AS double), 
0.5)
+insert overwrite table t2_n79 SELECT percentile_approx(cast(key AS double), 
0.5, 100)
+insert overwrite table t3_n31 SELECT percentile_approx(cast(key AS double), 
0.5, 1000)
 
-insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5)
-insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100)
-insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000)
+insert overwrite table t4_n18 SELECT percentile_approx(cast(key AS int), 0.5)
+insert overwrite table t5_n5 SELECT percentile_approx(cast(key AS int), 0.5, 
100)
+insert overwrite table t6_n4 SELECT percentile_approx(cast(key AS int), 0.5, 
1000)
 
-insert overwrite table t7 SELECT percentile_approx(cast(key AS double), 
array(0.05,0.5,0.95,0.98))
-insert overwrite table t8 SELECT percentile_approx(cast(key AS double), 
array(0.05,0.5,0.95,0.98), 100)
-insert overwrite table t9 SELECT percentile_approx(cast(key AS double), 
array(0.05,0.5,0.95,0.98), 1000)
+insert overwrite table t7_n5 SELECT percentile_approx(cast(key AS double), 
array(0.05,0.5,0.95,0.98))
+insert overwrite table t8_n3 SELECT percentile_approx(cast(key AS double), 
array(0.05,0.5,0.95,0.98), 100)
+insert overwrite table t9_n2 SELECT percentile_approx(cast(key AS double), 
array(0.05,0.5,0.95,0.98), 1000)
 
-insert overwrite table t10 SELECT percentile_approx(cast(key AS int), 
array(0.05,0.5,0.95,0.98))
-insert overwrite table t11 SELECT percentile_approx(cast(key AS int), 
array(0.05,0.5,0.95,0.98), 100)
-insert overwrite table t12 SELECT percentile_approx(cast(key AS int), 
array(0.05,0.5,0.95,0.98), 1000);
+insert overwrite table t10_n1 SELECT percentile_approx(cast(key AS int), 
array(0.05,0.5,0.95,0.98))
+insert overwrite table t11_n3 SELECT percentile_approx(cast(key AS int), 
array(0.05,0.5,0.95,0.98), 100)
+insert overwrite table t12_n1 SELECT percentile_approx(cast(key AS int), 
array(0.05,0.5,0.95,0.98), 1000);
 
-select * from t1;
-select * from t2;
-select * from t3;
-select * from t4;
-select * from t5;
-select * from t6;
-select * from t7;
-select * from t8;
-select * from t9;
-select * from t10;
-select * from t11;
-select * from t12;
+select * from t1_n132;
+select * from t2_n79;
+select * from t3_n31;
+select * from t4_n18;
+select * from t5_n5;
+select * from t6_n4;
+select * from t7_n5;
+select * from t8_n3;
+select * from t9_n2;
+select * from t10_n1;
+select * from t11_n3;
+select * from t12_n1;
 
 set hive.map.aggr=true;
 -- enable map-side aggregation
-FROM bucket
-insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5)
-insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 
100)
-insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 
1000)
+FROM bucket_n1
+insert overwrite table t1_n132 SELECT percentile_approx(cast(key AS double), 
0.5)
+insert overwrite table t2_n79 SELECT percentile_approx(cast(key AS double), 
0.5, 100)
+insert overwrite table t3_n31 SELECT percentile_approx(cast(key AS double), 
0.5, 1000)
 
-insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5)
-insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100)
-insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000)
+insert overwrite table t4_n18 SELECT percentile_approx(cast(key AS int), 0.5)
+insert overwrite table t5_n5 SELECT percentile_approx(cast(key AS int), 0.5, 
100)
+insert overwrite table t6_n4 SELECT percentile_approx(cast(key AS int), 0.5, 
1000)
 
-insert overwrite table t7 SELECT percentile_approx(cast(key AS double), 
array(0.05,0.5,0.95,0.98))
-insert overwrite table t8 SELECT percentile_approx(cast(key AS double), 
array(0.05,0.5,0.95,0.98), 100)
-insert overwrite table t9 SELECT percentile_approx(cast(key AS double), 
array(0.05,0.5,0.95,0.98), 1000)
+insert overwrite table t7_n5 SELECT percentile_approx(cast(key AS double), 
array(0.05,0.5,0.95,0.98))
+insert overwrite table t8_n3 SELECT percentile_approx(cast(key AS double), 
array(0.05,0.5,0.95,0.98), 100)
+insert overwrite table t9_n2 SELECT percentile_approx(cast(key AS double), 
array(0.05,0.5,0.95,0.98), 1000)
 
-insert overwrite table t10 SELECT percentile_approx(cast(key AS int), 
array(0.05,0.5,0.95,0.98))
-insert overwrite table t11 SELECT percentile_approx(cast(key AS int), 
array(0.05,0.5,0.95,0.98), 100)
-insert overwrite table t12 SELECT percentile_approx(cast(key AS int), 
array(0.05,0.5,0.95,0.98), 1000);
+insert overwrite table t10_n1 SELECT percentile_approx(cast(key AS int), 
array(0.05,0.5,0.95,0.98))
+insert overwrite table t11_n3 SELECT percentile_approx(cast(key AS int), 
array(0.05,0.5,0.95,0.98), 100)
+insert overwrite table t12_n1 SELECT percentile_approx(cast(key AS int), 
array(0.05,0.5,0.95,0.98), 1000);
 
-select * from t1;
-select * from t2;
-select * from t3;
-select * from t4;
-select * from t5;
-select * from t6;
-select * from t7;
-select * from t8;
-select * from t9;
-select * from t10;
-select * from t11;
-select * from t12;
+select * from t1_n132;
+select * from t2_n79;
+select * from t3_n31;
+select * from t4_n18;
+select * from t5_n5;
+select * from t6_n4;
+select * from t7_n5;
+select * from t8_n3;
+select * from t9_n2;
+select * from t10_n1;
+select * from t11_n3;
+select * from t12_n1;
 
 -- NaN
 explain
-select percentile_approx(case when key < 100 then cast('NaN' as double) else 
key end, 0.5) from bucket;
-select percentile_approx(case when key < 100 then cast('NaN' as double) else 
key end, 0.5) between 340.5 and 343.0 from bucket;
+select percentile_approx(case when key < 100 then cast('NaN' as double) else 
key end, 0.5) from bucket_n1;
+select percentile_approx(case when key < 100 then cast('NaN' as double) else 
key end, 0.5) between 340.5 and 343.0 from bucket_n1;
 
 -- with CBO
 explain
-select percentile_approx(key, 0.5) from bucket;
-select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket;
+select percentile_approx(key, 0.5) from bucket_n1;
+select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket_n1;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_rp_unionDistinct_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_unionDistinct_2.q 
b/ql/src/test/queries/clientpositive/cbo_rp_unionDistinct_2.q
index 8f132f2..28afea7 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_unionDistinct_2.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_unionDistinct_2.q
@@ -2,129 +2,129 @@
 set hive.cbo.returnpath.hiveop=true;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE u1 as select key, value from src order by key limit 5;
+CREATE TABLE u1_n0 as select key, value from src order by key limit 5;
 
-CREATE TABLE u2 as select key, value from src order by key limit 3;
+CREATE TABLE u2_n0 as select key, value from src order by key limit 3;
 
-CREATE TABLE u3 as select key, value from src order by key desc limit 5;
+CREATE TABLE u3_n0 as select key, value from src order by key desc limit 5;
 
-select * from u1;
+select * from u1_n0;
 
-select * from u2;
+select * from u2_n0;
 
-select * from u3;
+select * from u3_n0;
 
 select key, value from 
 (
-select key, value from u1
+select key, value from u1_n0
 union all
-select key, value from u2
+select key, value from u2_n0
 union all
-select key as key, value from u3
+select key as key, value from u3_n0
 ) tab;
 
 select key, value from 
 (
-select key, value from u1
+select key, value from u1_n0
 union 
-select key, value from u2
+select key, value from u2_n0
 union all
-select key, value from u3
+select key, value from u3_n0
 ) tab;
 
 select key, value from 
 (
-select key, value from u1
+select key, value from u1_n0
 union distinct
-select key, value from u2
+select key, value from u2_n0
 union all
-select key as key, value from u3
+select key as key, value from u3_n0
 ) tab;
 
 select key, value from 
 (
-select key, value from u1
+select key, value from u1_n0
 union all
-select key, value from u2
+select key, value from u2_n0
 union
-select key, value from u3
+select key, value from u3_n0
 ) tab;
 
 select key, value from 
 (
-select key, value from u1
+select key, value from u1_n0
 union 
-select key, value from u2
+select key, value from u2_n0
 union
-select key as key, value from u3
+select key as key, value from u3_n0
 ) tab;
 
 select distinct * from 
 (
-select key, value from u1
+select key, value from u1_n0
 union all 
-select key, value from u2
+select key, value from u2_n0
 union all
-select key as key, value from u3
+select key as key, value from u3_n0
 ) tab;
 
 select distinct * from 
 (
-select distinct * from u1
+select distinct * from u1_n0
 union  
-select key, value from u2
+select key, value from u2_n0
 union all
-select key as key, value from u3
+select key as key, value from u3_n0
 ) tab;
 
-drop view if exists v;
+drop view if exists v_n14;
 
 set hive.cbo.returnpath.hiveop=false;
-create view v as select distinct * from 
+create view v_n14 as select distinct * from 
 (
-select distinct * from u1
+select distinct * from u1_n0
 union  
-select key, value from u2
+select key, value from u2_n0
 union all
-select key as key, value from u3
+select key as key, value from u3_n0
 ) tab;
 
-describe extended v;
+describe extended v_n14;
 
-select * from v;
+select * from v_n14;
 
-drop view if exists v;
+drop view if exists v_n14;
 
-create view v as select tab.* from 
+create view v_n14 as select tab.* from 
 (
-select distinct * from u1
+select distinct * from u1_n0
 union  
-select distinct * from u2
+select distinct * from u2_n0
 ) tab;
 
-describe extended v;
+describe extended v_n14;
 
-select * from v;
+select * from v_n14;
 
-drop view if exists v;
+drop view if exists v_n14;
 
-create view v as select * from 
+create view v_n14 as select * from 
 (
-select distinct u1.* from u1
+select distinct u1_n0.* from u1_n0
 union all 
-select distinct * from u2
+select distinct * from u2_n0
 ) tab;
 
-describe extended v;
+describe extended v_n14;
 
-select * from v;
+select * from v_n14;
 
 select distinct * from 
 (
-select key, value from u1
+select key, value from u1_n0
 union all 
-select key, value from u2
+select key, value from u2_n0
 union 
-select key as key, value from u3
+select key as key, value from u3_n0
 ) tab;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_rp_views.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_views.q 
b/ql/src/test/queries/clientpositive/cbo_rp_views.q
index d1f2bcb..eb21f2e 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_views.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_views.q
@@ -7,46 +7,46 @@ set hive.stats.fetch.column.stats=true;
 set hive.auto.convert.join=false;
 
 -- 10. Test views
-create view v1 as select c_int, value, c_boolean, dt from cbo_t1;
-create view v2 as select c_int, value from cbo_t2;
+create view v1_n18 as select c_int, value, c_boolean, dt from cbo_t1;
+create view v2_n11 as select c_int, value from cbo_t2;
 
 set hive.cbo.returnpath.hiveop=true;
-select value from v1 where c_boolean=false;
-select max(c_int) from v1 group by (c_boolean);
+select value from v1_n18 where c_boolean=false;
+select max(c_int) from v1_n18 group by (c_boolean);
 
-select count(v1.c_int)  from v1 join cbo_t2 on v1.c_int = cbo_t2.c_int;
-select count(v1.c_int)  from v1 join v2 on v1.c_int = v2.c_int;
+select count(v1_n18.c_int)  from v1_n18 join cbo_t2 on v1_n18.c_int = 
cbo_t2.c_int;
+select count(v1_n18.c_int)  from v1_n18 join v2_n11 on v1_n18.c_int = 
v2_n11.c_int;
 
-select count(*) from v1 a join v1 b on a.value = b.value;
+select count(*) from v1_n18 a join v1_n18 b on a.value = b.value;
 
 set hive.cbo.returnpath.hiveop=false;
-create view v3 as select v1.value val from v1 join cbo_t1 on v1.c_boolean = 
cbo_t1.c_boolean;
+create view v3_n4 as select v1_n18.value val from v1_n18 join cbo_t1 on 
v1_n18.c_boolean = cbo_t1.c_boolean;
 
 set hive.cbo.returnpath.hiveop=true;
-select count(val) from v3 where val != '1';
+select count(val) from v3_n4 where val != '1';
 with q1 as ( select key from cbo_t1 where key = '1')
 select count(*) from q1;
 
-with q1 as ( select value from v1 where c_boolean = false)
+with q1 as ( select value from v1_n18 where c_boolean = false)
 select count(value) from q1 ;
 
 set hive.cbo.returnpath.hiveop=false;
-create view v4 as
+create view v4_n4 as
 with q1 as ( select key,c_int from cbo_t1  where key = '1')
 select * from q1
 ;
 
 set hive.cbo.returnpath.hiveop=true;
 with q1 as ( select c_int from q2 where c_boolean = false),
-q2 as ( select c_int,c_boolean from v1  where value = '1')
+q2 as ( select c_int,c_boolean from v1_n18  where value = '1')
 select sum(c_int) from (select c_int from q1) a;
 
 with q1 as ( select cbo_t1.c_int c_int from q2 join cbo_t1 where q2.c_int = 
cbo_t1.c_int  and cbo_t1.dt='2014'),
-q2 as ( select c_int,c_boolean from v1  where value = '1' or dt = '14')
-select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = 
q2.c_int;
+q2 as ( select c_int,c_boolean from v1_n18  where value = '1' or dt = '14')
+select count(*) from q1 join q2 join v4_n4 on q1.c_int = q2.c_int and 
v4_n4.c_int = q2.c_int;
 
 
-drop view v1;
-drop view v2;
-drop view v3;
-drop view v4;
+drop view v1_n18;
+drop view v2_n11;
+drop view v3_n4;
+drop view v4_n4;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_rp_windowing_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_windowing_2.q 
b/ql/src/test/queries/clientpositive/cbo_rp_windowing_2.q
index 3978766..9a1afef 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_windowing_2.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_windowing_2.q
@@ -201,7 +201,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name 
rows between 2 precedi
 
 set hive.cbo.returnpath.hiveop=false;
 -- 22. testViewAsTableInputWithWindowing
-create view IF NOT EXISTS mfgr_price_view as 
+create view IF NOT EXISTS mfgr_price_view_n4 as 
 select p_mfgr, p_brand, 
 round(sum(p_retailprice),2) as s 
 from part 
@@ -212,26 +212,26 @@ select *
 from (
 select p_mfgr, p_brand, s, 
 round(sum(s) over w1 , 2)  as s1
-from mfgr_price_view 
+from mfgr_price_view_n4 
 window w1 as (distribute by p_mfgr sort by p_mfgr )
 ) sq
 order by p_mfgr, p_brand;
 
 select p_mfgr, p_brand, s, 
 round(sum(s) over w1 ,2)  as s1
-from mfgr_price_view 
+from mfgr_price_view_n4 
 window w1 as (distribute by p_mfgr sort by p_brand rows between 2 preceding 
and current row);
 
 set hive.cbo.returnpath.hiveop=false;
 -- 23. testCreateViewWithWindowingQuery
-create view IF NOT EXISTS mfgr_brand_price_view as 
+create view IF NOT EXISTS mfgr_brand_price_view_n1 as 
 select p_mfgr, p_brand, 
 round(sum(p_retailprice) over w1,2) as s
 from part 
 window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 
current row);
         
 set hive.cbo.returnpath.hiveop=true ;
-select * from mfgr_brand_price_view;        
+select * from mfgr_brand_price_view_n1;        
         
 -- 24. testLateralViews
 select p_mfgr, p_name, 
@@ -241,7 +241,7 @@ lateral view explode(arr) part_lv as lv_col
 window w1 as (distribute by p_mfgr sort by p_size, lv_col rows between 2 
preceding and current row);        
 
 -- 25. testMultipleInserts3SWQs
-CREATE TABLE part_1( 
+CREATE TABLE part_1_n1( 
 p_mfgr STRING, 
 p_name STRING, 
 p_size INT, 
@@ -249,7 +249,7 @@ r INT,
 dr INT, 
 s DOUBLE);
 
-CREATE TABLE part_2( 
+CREATE TABLE part_2_n1( 
 p_mfgr STRING, 
 p_name STRING, 
 p_size INT, 
@@ -259,7 +259,7 @@ cud INT,
 s2 DOUBLE, 
 fv1 INT);
 
-CREATE TABLE part_3( 
+CREATE TABLE part_3_n1( 
 p_mfgr STRING, 
 p_name STRING, 
 p_size INT, 
@@ -268,12 +268,12 @@ ca INT,
 fv INT);
 
 from part 
-INSERT OVERWRITE TABLE part_1 
+INSERT OVERWRITE TABLE part_1_n1 
 select p_mfgr, p_name, p_size, 
 rank() over(distribute by p_mfgr sort by p_name ) as r, 
 dense_rank() over(distribute by p_mfgr sort by p_name ) as dr, 
 round(sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows 
between unbounded preceding and current row),2) as s
-INSERT OVERWRITE TABLE part_2 
+INSERT OVERWRITE TABLE part_2_n1 
 select  p_mfgr,p_name, p_size,  
 rank() over(distribute by p_mfgr sort by p_name) as r, 
 dense_rank() over(distribute by p_mfgr sort by p_name) as dr, 
@@ -281,18 +281,18 @@ cume_dist() over(distribute by p_mfgr sort by p_name) as 
cud,
 round(sum(p_size) over (distribute by p_mfgr sort by p_size range between 5 
preceding and current row),1) as s2, 
 first_value(p_size) over w1  as fv1
 window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 
preceding and 2 following) 
-INSERT OVERWRITE TABLE part_3 
+INSERT OVERWRITE TABLE part_3_n1 
 select  p_mfgr,p_name, p_size,  
 count(*) over(distribute by p_mfgr sort by p_name) as c, 
 count(p_size) over(distribute by p_mfgr sort by p_name) as ca, 
 first_value(p_size) over w1  as fv
 window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 
preceding and 2 following);
 
-select * from part_1;
+select * from part_1_n1;
 
-select * from part_2;
+select * from part_2_n1;
 
-select * from part_3;
+select * from part_3_n1;
 
 -- 26. testGroupByHavingWithSWQAndAlias
 select p_mfgr, p_name, p_size, min(p_retailprice) as mi,

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_subq_exists.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_subq_exists.q 
b/ql/src/test/queries/clientpositive/cbo_subq_exists.q
index bc71840..45f3184 100644
--- a/ql/src/test/queries/clientpositive/cbo_subq_exists.q
+++ b/ql/src/test/queries/clientpositive/cbo_subq_exists.q
@@ -29,7 +29,7 @@ having not exists
 
 -- 19. SubQueries Exists
 -- view test
-create view cv1 as 
+create view cv1_n2 as 
 select * 
 from src_cbo b 
 where exists
@@ -38,7 +38,7 @@ where exists
   where b.value = a.value  and a.key = b.key and a.value > 'val_9')
 ;
 
-select * from cv1
+select * from cv1_n2
 ;
 
 -- sq in from

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/cbo_views.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_views.q 
b/ql/src/test/queries/clientpositive/cbo_views.q
index 650bad6..fc7e5d2 100644
--- a/ql/src/test/queries/clientpositive/cbo_views.q
+++ b/ql/src/test/queries/clientpositive/cbo_views.q
@@ -8,41 +8,41 @@ set hive.stats.fetch.column.stats=true;
 set hive.auto.convert.join=false;
 
 -- 10. Test views
-create view v1 as select c_int, value, c_boolean, dt from cbo_t1;
-create view v2 as select c_int, value from cbo_t2;
+create view v1_n12 as select c_int, value, c_boolean, dt from cbo_t1;
+create view v2_n6 as select c_int, value from cbo_t2;
 
-select value from v1 where c_boolean=false;
-select max(c_int) from v1 group by (c_boolean);
+select value from v1_n12 where c_boolean=false;
+select max(c_int) from v1_n12 group by (c_boolean);
 
-select count(v1.c_int)  from v1 join cbo_t2 on v1.c_int = cbo_t2.c_int;
-select count(v1.c_int)  from v1 join v2 on v1.c_int = v2.c_int;
+select count(v1_n12.c_int)  from v1_n12 join cbo_t2 on v1_n12.c_int = 
cbo_t2.c_int;
+select count(v1_n12.c_int)  from v1_n12 join v2_n6 on v1_n12.c_int = 
v2_n6.c_int;
 
-select count(*) from v1 a join v1 b on a.value = b.value;
+select count(*) from v1_n12 a join v1_n12 b on a.value = b.value;
 
-create view v3 as select v1.value val from v1 join cbo_t1 on v1.c_boolean = 
cbo_t1.c_boolean;
+create view v3_n2 as select v1_n12.value val from v1_n12 join cbo_t1 on 
v1_n12.c_boolean = cbo_t1.c_boolean;
 
-select count(val) from v3 where val != '1';
+select count(val) from v3_n2 where val != '1';
 with q1 as ( select key from cbo_t1 where key = '1')
 select count(*) from q1;
 
-with q1 as ( select value from v1 where c_boolean = false)
+with q1 as ( select value from v1_n12 where c_boolean = false)
 select count(value) from q1 ;
 
-create view v4 as
+create view v4_n2 as
 with q1 as ( select key,c_int from cbo_t1  where key = '1')
 select * from q1
 ;
 
 with q1 as ( select c_int from q2 where c_boolean = false),
-q2 as ( select c_int,c_boolean from v1  where value = '1')
+q2 as ( select c_int,c_boolean from v1_n12  where value = '1')
 select sum(c_int) from (select c_int from q1) a;
 
 with q1 as ( select cbo_t1.c_int c_int from q2 join cbo_t1 where q2.c_int = 
cbo_t1.c_int  and cbo_t1.dt='2014'),
-q2 as ( select c_int,c_boolean from v1  where value = '1' or dt = '14')
-select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = 
q2.c_int;
+q2 as ( select c_int,c_boolean from v1_n12  where value = '1' or dt = '14')
+select count(*) from q1 join q2 join v4_n2 on q1.c_int = q2.c_int and 
v4_n2.c_int = q2.c_int;
 
 
-drop view v1;
-drop view v2;
-drop view v3;
-drop view v4;
+drop view v1_n12;
+drop view v2_n6;
+drop view v3_n2;
+drop view v4_n2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/char_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/char_2.q 
b/ql/src/test/queries/clientpositive/char_2.q
index e50aa14..ac743b3 100644
--- a/ql/src/test/queries/clientpositive/char_2.q
+++ b/ql/src/test/queries/clientpositive/char_2.q
@@ -1,12 +1,12 @@
 --! qt:dataset:src
-drop table char_2;
+drop table char_2_n1;
 
-create table char_2 (
+create table char_2_n1 (
   key char(10),
   value char(20)
 );
 
-insert overwrite table char_2 select * from src;
+insert overwrite table char_2_n1 select * from src;
 
 select value, sum(cast(key as int)), count(*) numrows
 from src
@@ -16,7 +16,7 @@ limit 5;
 
 -- should match the query from src
 select value, sum(cast(key as int)), count(*) numrows
-from char_2
+from char_2_n1
 group by value
 order by value asc
 limit 5;
@@ -29,9 +29,9 @@ limit 5;
 
 -- should match the query from src
 select value, sum(cast(key as int)), count(*) numrows
-from char_2
+from char_2_n1
 group by value
 order by value desc
 limit 5;
 
-drop table char_2;
+drop table char_2_n1;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/char_join1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/char_join1.q 
b/ql/src/test/queries/clientpositive/char_join1.q
index b8699e3..2502061 100644
--- a/ql/src/test/queries/clientpositive/char_join1.q
+++ b/ql/src/test/queries/clientpositive/char_join1.q
@@ -2,7 +2,7 @@
 
 drop table char_join1_ch1;
 drop table char_join1_ch2;
-drop table char_join1_str;
+drop table char_join1_str_n0;
 
 create table  char_join1_ch1 (
   c1 int,
@@ -14,14 +14,14 @@ create table  char_join1_ch2 (
   c2 char(20)
 );
 
-create table  char_join1_str (
+create table  char_join1_str_n0 (
   c1 int,
   c2 string
 );
 
 load data local inpath '../../data/files/vc1.txt' into table char_join1_ch1;
 load data local inpath '../../data/files/vc1.txt' into table char_join1_ch2;
-load data local inpath '../../data/files/vc1.txt' into table char_join1_str;
+load data local inpath '../../data/files/vc1.txt' into table char_join1_str_n0;
 
 -- Join char with same length char
 select * from char_join1_ch1 a join char_join1_ch1 b on (a.c2 = b.c2);
@@ -30,8 +30,8 @@ select * from char_join1_ch1 a join char_join1_ch1 b on (a.c2 
= b.c2);
 select * from char_join1_ch1 a join char_join1_ch2 b on (a.c2 = b.c2);
 
 -- Join char with string
-select * from char_join1_ch1 a join char_join1_str b on (a.c2 = b.c2);
+select * from char_join1_ch1 a join char_join1_str_n0 b on (a.c2 = b.c2);
 
 drop table char_join1_ch1;
 drop table char_join1_ch2;
-drop table char_join1_str;
+drop table char_join1_str_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/char_pad_convert.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/char_pad_convert.q 
b/ql/src/test/queries/clientpositive/char_pad_convert.q
index a65132b..b3e0a28 100644
--- a/ql/src/test/queries/clientpositive/char_pad_convert.q
+++ b/ql/src/test/queries/clientpositive/char_pad_convert.q
@@ -1,5 +1,5 @@
 
-create table over1k(
+create table over1k_n6(
            t tinyint,
            si smallint,
            i int,
@@ -14,7 +14,7 @@ create table over1k(
        row format delimited
        fields terminated by '|';
 
-load data local inpath '../../data/files/over1k' into table over1k;
+load data local inpath '../../data/files/over1k' into table over1k_n6;
 
 -- Pass non-strings for the first and third arguments to test argument 
conversion
 
@@ -22,23 +22,23 @@ load data local inpath '../../data/files/over1k' into table 
over1k;
 select lpad(t, 4, ' '),
        lpad(si, 2, ' '),
        lpad(i, 9, 'z'),
-       lpad(b, 2, 'a') from over1k limit 5;
+       lpad(b, 2, 'a') from over1k_n6 limit 5;
 
 select lpad("oh", 10, t),
        lpad("my", 6, si),
        lpad("other", 14, i),
-       lpad("one", 12, b) from over1k limit 5;
+       lpad("one", 12, b) from over1k_n6 limit 5;
 
 -- Integers
 select rpad(t, 4, ' '),
        rpad(si, 2, ' '),
        rpad(i, 9, 'z'),
-       rpad(b, 2, 'a') from over1k limit 5;
+       rpad(b, 2, 'a') from over1k_n6 limit 5;
 
 select rpad("oh", 10, t),
        rpad("my", 6, si),
        rpad("other", 14, i),
-       rpad("one", 12, b) from over1k limit 5;
+       rpad("one", 12, b) from over1k_n6 limit 5;
 
 -- More
 select lpad(f, 4, ' '),
@@ -46,25 +46,25 @@ select lpad(f, 4, ' '),
        lpad(bo, 9, 'z'),
        lpad(ts, 2, 'a'),
        lpad(`dec`, 7, 'd'),
-       lpad(bin, 8, 'b') from over1k limit 5;
+       lpad(bin, 8, 'b') from over1k_n6 limit 5;
 
 select lpad("oh", 10, f),
        lpad("my", 6, d),
        lpad("other", 14, bo),
        lpad("one", 12, ts),
        lpad("two", 7, `dec`),
-       lpad("three", 8, bin) from over1k limit 5;
+       lpad("three", 8, bin) from over1k_n6 limit 5;
 
 select rpad(f, 4, ' '),
        rpad(d, 2, ' '),
        rpad(bo, 9, 'z'),
        rpad(ts, 2, 'a'),
        rpad(`dec`, 7, 'd'),
-       rpad(bin, 8, 'b') from over1k limit 5;
+       rpad(bin, 8, 'b') from over1k_n6 limit 5;
 
 select rpad("oh", 10, f),
        rpad("my", 6, d),
        rpad("other", 14, bo),
        rpad("one", 12, ts),
        rpad("two", 7, `dec`),
-       rpad("three", 8, bin) from over1k limit 5;
+       rpad("three", 8, bin) from over1k_n6 limit 5;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/check_constraint.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/check_constraint.q 
b/ql/src/test/queries/clientpositive/check_constraint.q
index 5671b3e..8c3831b 100644
--- a/ql/src/test/queries/clientpositive/check_constraint.q
+++ b/ql/src/test/queries/clientpositive/check_constraint.q
@@ -2,27 +2,27 @@ set hive.stats.autogather=false;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
-CREATE TABLE table1(i int CHECK -i > -10,
+CREATE TABLE table1_n0(i int CHECK -i > -10,
     j int CHECK +j > 10,
     ij boolean CHECK ij IS NOT NULL,
     a int CHECK a BETWEEN i AND j,
     bb float CHECK bb IN (23.4,56,4),
     d bigint CHECK d > round(567.6) AND d < round(1000.4))
     clustered by (i) into 2 buckets stored as orc 
TBLPROPERTIES('transactional'='true');
-DESC FORMATTED table1;
+DESC FORMATTED table1_n0;
 
-EXPLAIN INSERT INTO table1 values(1,100,true, 5, 23.4, 700.5);
-INSERT INTO table1 values(1,100,true, 5, 23.4, 700.5);
-SELECT * from table1;
-DROP TABLE table1;
+EXPLAIN INSERT INTO table1_n0 values(1,100,true, 5, 23.4, 700.5);
+INSERT INTO table1_n0 values(1,100,true, 5, 23.4, 700.5);
+SELECT * from table1_n0;
+DROP TABLE table1_n0;
 
 -- null check constraint
-CREATE TABLE table2(i int CHECK i + NULL > 0);
-DESC FORMATTED table2;
-EXPLAIN INSERT INTO table2 values(8);
-INSERT INTO table2 values(8);
-select * from table2;
-Drop table table2;
+CREATE TABLE table2_n0(i int CHECK i + NULL > 0);
+DESC FORMATTED table2_n0;
+EXPLAIN INSERT INTO table2_n0 values(8);
+INSERT INTO table2_n0 values(8);
+select * from table2_n0;
+Drop table table2_n0;
 
 -- UDF created by users
 CREATE FUNCTION test_udf2 AS 
'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestGetJavaString';
@@ -79,57 +79,57 @@ SELECT * from texpr;
 DROP TABLE texpr;
 
 -- UPDATE
-create table acid_uami(i int,
+create table acid_uami_n0(i int,
                  de decimal(5,2) constraint nn1 not null enforced,
                  vc varchar(128) constraint ch2 CHECK de >= cast(i as 
decimal(5,2)) enforced)
                  clustered by (i) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true');
-DESC FORMATTED acid_uami;
+DESC FORMATTED acid_uami_n0;
 
 --! qt:dataset:src
 -- insert as select
-explain insert into table acid_uami select cast(key as int), cast (key as 
decimal(5,2)), value from src;
-insert into table acid_uami select cast(key as int), cast (key as 
decimal(5,2)), value from src;
+explain insert into table acid_uami_n0 select cast(key as int), cast (key as 
decimal(5,2)), value from src;
+insert into table acid_uami_n0 select cast(key as int), cast (key as 
decimal(5,2)), value from src;
 
 -- insert overwrite
-explain insert overwrite table acid_uami select cast(key as int), cast (key as 
decimal(5,2)), value
+explain insert overwrite table acid_uami_n0 select cast(key as int), cast (key 
as decimal(5,2)), value
     from src order by cast(key as int) limit 10 ;
-insert overwrite table acid_uami select cast(key as int), cast (key as 
decimal(5,2)), value
+insert overwrite table acid_uami_n0 select cast(key as int), cast (key as 
decimal(5,2)), value
     from src order by cast(key as int) limit 10 ;
 
 -- insert as select cont
-explain insert into table acid_uami select cast(s1.key as int) as c1, cast 
(s2.key as decimal(5,2)) as c2, s1.value from src s1
+explain insert into table acid_uami_n0 select cast(s1.key as int) as c1, cast 
(s2.key as decimal(5,2)) as c2, s1.value from src s1
     left outer join src s2 on s1.key=s2.key where s1.value > 'val' limit 10 ;
-insert into table acid_uami select cast(s1.key as int) as c1, cast (s2.key as 
decimal(5,2)) as c2, s1.value from src s1
+insert into table acid_uami_n0 select cast(s1.key as int) as c1, cast (s2.key 
as decimal(5,2)) as c2, s1.value from src s1
     left outer join src s2 on s1.key=s2.key where s1.value > 'val' limit 10 ;
-select * from acid_uami;
-truncate table acid_uami;
+select * from acid_uami_n0;
+truncate table acid_uami_n0;
 
 -- insert as select group by + agg
-explain insert into table acid_uami select min(cast(key as int)) as c1, 
max(cast (key as decimal(5,2))) as c2, value
+explain insert into table acid_uami_n0 select min(cast(key as int)) as c1, 
max(cast (key as decimal(5,2))) as c2, value
     from src group by key, value order by key, value limit 10;
-insert into table acid_uami select min(cast(key as int)) as c1, max(cast (key 
as decimal(5,2))) as c2, value
+insert into table acid_uami_n0 select min(cast(key as int)) as c1, max(cast 
(key as decimal(5,2))) as c2, value
     from src group by key, value order by key, value limit 10;
-select * from acid_uami;
-truncate table acid_uami;
+select * from acid_uami_n0;
+truncate table acid_uami_n0;
 
 -- multi insert
-create table src_multi2 (i STRING, j STRING NOT NULL ENABLE);
+create table src_multi2_n0 (i STRING, j STRING NOT NULL ENABLE);
 explain
 from src
-insert into table acid_uami select cast(key as int), cast(key as 
decimal(5,2)), value where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
-drop table src_multi2;
+insert into table acid_uami_n0 select cast(key as int), cast(key as 
decimal(5,2)), value where key < 10
+insert overwrite table src_multi2_n0 select * where key > 10 and key < 20;
+drop table src_multi2_n0;
 
 -- update
-select * from acid_uami order by de desc limit 15;
-explain update acid_uami set de = 893.14 where de = 103.00 or de = 119.00;
-update acid_uami set de = 893.14 where de = 103.00 or de = 119.00;
-select * from acid_uami order by de desc limit 15;
-ALTER table acid_uami drop constraint ch2;
-explain update acid_uami set vc = 'apache_hive' where de = 893.14 ;
-update acid_uami set vc = 'apache_hive' where de = 893.14 ;
-select * from acid_uami order by vc limit 15;
-DROP TABLE acid_uami;
+select * from acid_uami_n0 order by de desc limit 15;
+explain update acid_uami_n0 set de = 893.14 where de = 103.00 or de = 119.00;
+update acid_uami_n0 set de = 893.14 where de = 103.00 or de = 119.00;
+select * from acid_uami_n0 order by de desc limit 15;
+ALTER table acid_uami_n0 drop constraint ch2;
+explain update acid_uami_n0 set vc = 'apache_hive' where de = 893.14 ;
+update acid_uami_n0 set vc = 'apache_hive' where de = 893.14 ;
+select * from acid_uami_n0 order by vc limit 15;
+DROP TABLE acid_uami_n0;
 
 -- MERGE
 create table tmerge(key int CHECK key > 0 AND (key < 100 OR key = 5) enable, 
a1 string NOT NULL, value string)
@@ -202,12 +202,12 @@ DESC FORMATTED trely;
 DROP TABLE trely;
 
 -- table level constraint
-create table tbl1(a string, b int, CONSTRAINT check1 CHECK a != '' AND b > 4);
-desc formatted tbl1;
-explain insert into tbl1 values('a', 69);
-insert into tbl1 values('a', 69);
-select * from tbl1;
-ALTER TABLE tbl1 add constraint chk2 CHECK (b < 100);
-desc formatted tbl1;
-explain insert into tbl1 values('a', 69);
-drop table tbl1;
+create table tbl1_n1(a string, b int, CONSTRAINT check1 CHECK a != '' AND b > 
4);
+desc formatted tbl1_n1;
+explain insert into tbl1_n1 values('a', 69);
+insert into tbl1_n1 values('a', 69);
+select * from tbl1_n1;
+ALTER TABLE tbl1_n1 add constraint chk2 CHECK (b < 100);
+desc formatted tbl1_n1;
+explain insert into tbl1_n1 values('a', 69);
+drop table tbl1_n1;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/colstats_all_nulls.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/colstats_all_nulls.q 
b/ql/src/test/queries/clientpositive/colstats_all_nulls.q
index ae6b0f5..22a9e71 100644
--- a/ql/src/test/queries/clientpositive/colstats_all_nulls.q
+++ b/ql/src/test/queries/clientpositive/colstats_all_nulls.q
@@ -1,11 +1,11 @@
-CREATE TABLE src_null(a bigint) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null;
+CREATE TABLE src_null_n2(a bigint) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null_n2;
 
-create table all_nulls as SELECT a, cast(a as double) as b, cast(a as decimal) 
as c  FROM src_null where a is null limit 5;
+create table all_nulls as SELECT a, cast(a as double) as b, cast(a as decimal) 
as c  FROM src_null_n2 where a is null limit 5;
 analyze table all_nulls compute statistics for columns;
 
 describe formatted all_nulls a;
 describe formatted all_nulls b;
 
 drop table all_nulls;
-drop table src_null;
+drop table src_null_n2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/columnStatsUpdateForStatsOptimizer_1.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/columnStatsUpdateForStatsOptimizer_1.q 
b/ql/src/test/queries/clientpositive/columnStatsUpdateForStatsOptimizer_1.q
index a11e7f0..4c37914 100644
--- a/ql/src/test/queries/clientpositive/columnStatsUpdateForStatsOptimizer_1.q
+++ b/ql/src/test/queries/clientpositive/columnStatsUpdateForStatsOptimizer_1.q
@@ -3,65 +3,65 @@ set hive.stats.fetch.column.stats=true;
 set hive.compute.query.using.stats=true; 
 set hive.mapred.mode=nonstrict;
 
-drop table calendar;
+drop table calendar_n0;
 
-CREATE TABLE calendar (year int, month int);
+CREATE TABLE calendar_n0 (year int, month int);
 
-insert into calendar values (2010, 10), (2011, 11), (2012, 12); 
+insert into calendar_n0 values (2010, 10), (2011, 11), (2012, 12); 
 
-desc formatted calendar;
+desc formatted calendar_n0;
 
-analyze table calendar compute statistics;
+analyze table calendar_n0 compute statistics;
 
-desc formatted calendar;
+desc formatted calendar_n0;
 
-explain select count(1) from calendar; 
+explain select count(1) from calendar_n0; 
 
-explain select max(year) from calendar; 
+explain select max(year) from calendar_n0; 
 
-select max(year) from calendar; 
+select max(year) from calendar_n0; 
 
-select max(month) from calendar;
+select max(month) from calendar_n0;
 
-analyze table calendar compute statistics for columns;
+analyze table calendar_n0 compute statistics for columns;
 
-desc formatted calendar;
+desc formatted calendar_n0;
 
-explain select max(year) from calendar; 
+explain select max(year) from calendar_n0; 
 
-select max(year) from calendar;
+select max(year) from calendar_n0;
 
-insert into calendar values (2015, 15);
+insert into calendar_n0 values (2015, 15);
 
-desc formatted calendar;
+desc formatted calendar_n0;
 
-explain select max(year) from calendar; 
+explain select max(year) from calendar_n0; 
 
-select max(year) from calendar;
+select max(year) from calendar_n0;
 
-explain select max(month) from calendar; 
+explain select max(month) from calendar_n0; 
 
-select max(month) from calendar;
+select max(month) from calendar_n0;
 
-analyze table calendar compute statistics for columns year;
+analyze table calendar_n0 compute statistics for columns year;
 
-desc formatted calendar;
+desc formatted calendar_n0;
 
-explain select max(year) from calendar; 
+explain select max(year) from calendar_n0; 
 
-select max(year) from calendar;
+select max(year) from calendar_n0;
 
-explain select max(month) from calendar; 
+explain select max(month) from calendar_n0; 
 
-select max(month) from calendar;
+select max(month) from calendar_n0;
 
-analyze table calendar compute statistics for columns month;
+analyze table calendar_n0 compute statistics for columns month;
 
-desc formatted calendar;
+desc formatted calendar_n0;
 
-explain select max(month) from calendar; 
+explain select max(month) from calendar_n0; 
 
-select max(month) from calendar;
+select max(month) from calendar_n0;
 
 CREATE TABLE calendarp (`year` int)  partitioned by (p int);
 
@@ -87,15 +87,15 @@ explain select max(year) from calendarp where p=1;
 
 select max(year) from calendarp where p=1;
 
-create table t (key string, value string);
+create table t_n31 (key string, value string);
 
-load data local inpath '../../data/files/kv1.txt' into table t;
+load data local inpath '../../data/files/kv1.txt' into table t_n31;
 
-desc formatted t;
+desc formatted t_n31;
 
-analyze table t compute statistics;
+analyze table t_n31 compute statistics;
 
-desc formatted t;
+desc formatted t_n31;
 
 
 

Reply via email to