http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/parquet_map_null.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_map_null.q 
b/ql/src/test/queries/clientpositive/parquet_map_null.q
index 810532d..7f84100 100644
--- a/ql/src/test/queries/clientpositive/parquet_map_null.q
+++ b/ql/src/test/queries/clientpositive/parquet_map_null.q
@@ -2,14 +2,14 @@ set hive.vectorized.execution.enabled=false;
 
 -- This test attempts to write a parquet table from an avro table that 
contains map null values
 
-DROP TABLE IF EXISTS avro_table;
+DROP TABLE IF EXISTS avro_table_n0;
 DROP TABLE IF EXISTS parquet_table;
 
-CREATE TABLE avro_table (avreau_col_1 map<string,string>) STORED AS AVRO;
-LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO 
TABLE avro_table;
+CREATE TABLE avro_table_n0 (avreau_col_1 map<string,string>) STORED AS AVRO;
+LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO 
TABLE avro_table_n0;
 
-CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table;
+CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table_n0;
 SELECT * FROM parquet_table;
 
-DROP TABLE avro_table;
+DROP TABLE avro_table_n0;
 DROP TABLE parquet_table;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/parquet_nested_complex.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_nested_complex.q 
b/ql/src/test/queries/clientpositive/parquet_nested_complex.q
index 717e16f..9688e16 100644
--- a/ql/src/test/queries/clientpositive/parquet_nested_complex.q
+++ b/ql/src/test/queries/clientpositive/parquet_nested_complex.q
@@ -2,9 +2,9 @@
 set hive.vectorized.execution.enabled=false;
 set hive.test.vectorized.execution.enabled.override=none;
 
--- start with the original nestedcomplex test
+-- start with the original nestedcomplex_n0 test
 
-create table nestedcomplex (
+create table nestedcomplex_n0 (
 simple_int int,
 max_nested_array  
array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<int>>>>>>>>>>>>>>>>>>>>>>>,
 max_nested_map    
array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<map<string,string>>>>>>>>>>>>>>>>>>>>>>,
@@ -18,16 +18,16 @@ WITH SERDEPROPERTIES (
 )
 ;
 
-describe nestedcomplex;
-describe extended nestedcomplex;
+describe nestedcomplex_n0;
+describe extended nestedcomplex_n0;
 
-load data local inpath '../../data/files/nested_complex.txt' overwrite into 
table nestedcomplex;
+load data local inpath '../../data/files/nested_complex.txt' overwrite into 
table nestedcomplex_n0;
 
 -- and load the table into Parquet
 
-CREATE TABLE parquet_nested_complex STORED AS PARQUET AS SELECT * FROM 
nestedcomplex;
+CREATE TABLE parquet_nested_complex STORED AS PARQUET AS SELECT * FROM 
nestedcomplex_n0;
 
 SELECT * FROM parquet_nested_complex SORT BY simple_int;
 
-DROP TABLE nestedcomplex;
+DROP TABLE nestedcomplex_n0;
 DROP TABLE parquet_nested_complex;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/parquet_ppd_char.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_char.q 
b/ql/src/test/queries/clientpositive/parquet_ppd_char.q
index 31ee693..386fb25 100644
--- a/ql/src/test/queries/clientpositive/parquet_ppd_char.q
+++ b/ql/src/test/queries/clientpositive/parquet_ppd_char.q
@@ -7,74 +7,74 @@ SET hive.optimize.ppd=true;
 SET mapred.min.split.size=1000;
 SET mapred.max.split.size=5000;
 
-create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) 
stored as parquet;
+create table newtypestbl_n3(c char(10), v varchar(10), d decimal(5,3), da 
date) stored as parquet;
 
-insert overwrite table newtypestbl select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from 
src src1 union all select cast("hello" as char(10)), cast("world" as 
varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) 
uniontbl;
+insert overwrite table newtypestbl_n3 select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from 
src src1 union all select cast("hello" as char(10)), cast("world" as 
varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) 
uniontbl;
 
 set hive.optimize.index.filter=false;
 
 -- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN 
tests)
-select * from newtypestbl where c="apple";
+select * from newtypestbl_n3 where c="apple";
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where c="apple";
+select * from newtypestbl_n3 where c="apple";
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where c!="apple";
+select * from newtypestbl_n3 where c!="apple";
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where c!="apple";
+select * from newtypestbl_n3 where c!="apple";
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where c<"hello";
+select * from newtypestbl_n3 where c<"hello";
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where c<"hello";
+select * from newtypestbl_n3 where c<"hello";
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where c<="hello" sort by c;
+select * from newtypestbl_n3 where c<="hello" sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where c<="hello" sort by c;
+select * from newtypestbl_n3 where c<="hello" sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where c="apple ";
+select * from newtypestbl_n3 where c="apple ";
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where c="apple ";
+select * from newtypestbl_n3 where c="apple ";
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where c in ("apple", "carrot");
+select * from newtypestbl_n3 where c in ("apple", "carrot");
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where c in ("apple", "carrot");
+select * from newtypestbl_n3 where c in ("apple", "carrot");
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where c in ("apple", "hello") sort by c;
+select * from newtypestbl_n3 where c in ("apple", "hello") sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where c in ("apple", "hello") sort by c;
+select * from newtypestbl_n3 where c in ("apple", "hello") sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where c in ("carrot");
+select * from newtypestbl_n3 where c in ("carrot");
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where c in ("carrot");
+select * from newtypestbl_n3 where c in ("carrot");
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where c between "apple" and "carrot";
+select * from newtypestbl_n3 where c between "apple" and "carrot";
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where c between "apple" and "carrot";
+select * from newtypestbl_n3 where c between "apple" and "carrot";
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where c between "apple" and "zombie" sort by c;
+select * from newtypestbl_n3 where c between "apple" and "zombie" sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where c between "apple" and "zombie" sort by c;
+select * from newtypestbl_n3 where c between "apple" and "zombie" sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where c between "carrot" and "carrot1";
+select * from newtypestbl_n3 where c between "carrot" and "carrot1";
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where c between "carrot" and "carrot1";
\ No newline at end of file
+select * from newtypestbl_n3 where c between "carrot" and "carrot1";
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/parquet_ppd_date.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_date.q 
b/ql/src/test/queries/clientpositive/parquet_ppd_date.q
index ebc9f41..82085be 100644
--- a/ql/src/test/queries/clientpositive/parquet_ppd_date.q
+++ b/ql/src/test/queries/clientpositive/parquet_ppd_date.q
@@ -7,99 +7,99 @@ SET hive.optimize.ppd=true;
 SET mapred.min.split.size=1000;
 SET mapred.max.split.size=5000;
 
-create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) 
stored as parquet;
+create table newtypestbl_n2(c char(10), v varchar(10), d decimal(5,3), da 
date) stored as parquet;
 
-insert overwrite table newtypestbl select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from 
src src1 union all select cast("hello" as char(10)), cast("world" as 
varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) 
uniontbl;
+insert overwrite table newtypestbl_n2 select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from 
src src1 union all select cast("hello" as char(10)), cast("world" as 
varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) 
uniontbl;
 
 -- date data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN 
tests)
-select * from newtypestbl where da='1970-02-20';
+select * from newtypestbl_n2 where da='1970-02-20';
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da='1970-02-20';
+select * from newtypestbl_n2 where da='1970-02-20';
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da= date '1970-02-20';
+select * from newtypestbl_n2 where da= date '1970-02-20';
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where da=cast('1970-02-20' as date);
+select * from newtypestbl_n2 where da=cast('1970-02-20' as date);
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da=cast('1970-02-20' as date);
+select * from newtypestbl_n2 where da=cast('1970-02-20' as date);
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where da=cast('1970-02-20' as varchar(20));
+select * from newtypestbl_n2 where da=cast('1970-02-20' as varchar(20));
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da=cast('1970-02-20' as varchar(20));
+select * from newtypestbl_n2 where da=cast('1970-02-20' as varchar(20));
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where da!='1970-02-20';
+select * from newtypestbl_n2 where da!='1970-02-20';
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da!='1970-02-20';
+select * from newtypestbl_n2 where da!='1970-02-20';
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where da<'1970-02-27';
+select * from newtypestbl_n2 where da<'1970-02-27';
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da<'1970-02-27';
+select * from newtypestbl_n2 where da<'1970-02-27';
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where da<'1970-02-29' sort by c;
+select * from newtypestbl_n2 where da<'1970-02-29' sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da<'1970-02-29' sort by c;
+select * from newtypestbl_n2 where da<'1970-02-29' sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where da<'1970-02-15';
+select * from newtypestbl_n2 where da<'1970-02-15';
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da<'1970-02-15';
+select * from newtypestbl_n2 where da<'1970-02-15';
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where da<='1970-02-20';
+select * from newtypestbl_n2 where da<='1970-02-20';
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da<='1970-02-20';
+select * from newtypestbl_n2 where da<='1970-02-20';
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where da<='1970-02-27' sort by c;
+select * from newtypestbl_n2 where da<='1970-02-27' sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da<='1970-02-27' sort by c;
+select * from newtypestbl_n2 where da<='1970-02-27' sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where da in (cast('1970-02-21' as date), 
cast('1970-02-27' as date));
+select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), 
cast('1970-02-27' as date));
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da in (cast('1970-02-21' as date), 
cast('1970-02-27' as date));
+select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), 
cast('1970-02-27' as date));
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where da in (cast('1970-02-20' as date), 
cast('1970-02-27' as date)) sort by c;
+select * from newtypestbl_n2 where da in (cast('1970-02-20' as date), 
cast('1970-02-27' as date)) sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da in (cast('1970-02-20' as date), 
cast('1970-02-27' as date)) sort by c;
+select * from newtypestbl_n2 where da in (cast('1970-02-20' as date), 
cast('1970-02-27' as date)) sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where da in (cast('1970-02-21' as date), 
cast('1970-02-22' as date));
+select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), 
cast('1970-02-22' as date));
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da in (cast('1970-02-21' as date), 
cast('1970-02-22' as date));
+select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), 
cast('1970-02-22' as date));
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where da between '1970-02-19' and '1970-02-22';
+select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-22';
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da between '1970-02-19' and '1970-02-22';
+select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-22';
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where da between '1970-02-19' and '1970-02-28' sort 
by c;
+select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-28' 
sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da between '1970-02-19' and '1970-02-28' sort 
by c;
+select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-28' 
sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where da between '1970-02-18' and '1970-02-19';
+select * from newtypestbl_n2 where da between '1970-02-18' and '1970-02-19';
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where da between '1970-02-18' and '1970-02-19';
+select * from newtypestbl_n2 where da between '1970-02-18' and '1970-02-19';

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/parquet_ppd_decimal.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_decimal.q 
b/ql/src/test/queries/clientpositive/parquet_ppd_decimal.q
index 747c911..e8e118d 100644
--- a/ql/src/test/queries/clientpositive/parquet_ppd_decimal.q
+++ b/ql/src/test/queries/clientpositive/parquet_ppd_decimal.q
@@ -8,162 +8,162 @@ SET mapred.min.split.size=1000;
 SET mapred.max.split.size=5000;
 set hive.llap.cache.allow.synthetic.fileid=true;
 
-create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) 
stored as parquet;
+create table newtypestbl_n5(c char(10), v varchar(10), d decimal(5,3), da 
date) stored as parquet;
 
-insert overwrite table newtypestbl select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from 
src src1 union all select cast("hello" as char(10)), cast("world" as 
varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) 
uniontbl;
+insert overwrite table newtypestbl_n5 select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from 
src src1 union all select cast("hello" as char(10)), cast("world" as 
varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) 
uniontbl;
 
 -- decimal data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, 
BETWEEN tests)
-select * from newtypestbl where d=0.22;
+select * from newtypestbl_n5 where d=0.22;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d=0.22;
+select * from newtypestbl_n5 where d=0.22;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d='0.22';
+select * from newtypestbl_n5 where d='0.22';
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d='0.22';
+select * from newtypestbl_n5 where d='0.22';
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d=cast('0.22' as float);
+select * from newtypestbl_n5 where d=cast('0.22' as float);
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d=cast('0.22' as float);
+select * from newtypestbl_n5 where d=cast('0.22' as float);
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d!=0.22;
+select * from newtypestbl_n5 where d!=0.22;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d!=0.22;
+select * from newtypestbl_n5 where d!=0.22;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d!='0.22';
+select * from newtypestbl_n5 where d!='0.22';
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d!='0.22';
+select * from newtypestbl_n5 where d!='0.22';
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d!=cast('0.22' as float);
+select * from newtypestbl_n5 where d!=cast('0.22' as float);
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d!=cast('0.22' as float);
+select * from newtypestbl_n5 where d!=cast('0.22' as float);
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d<11.22;
+select * from newtypestbl_n5 where d<11.22;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d<11.22;
+select * from newtypestbl_n5 where d<11.22;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d<'11.22';
+select * from newtypestbl_n5 where d<'11.22';
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d<'11.22';
+select * from newtypestbl_n5 where d<'11.22';
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d<cast('11.22' as float);
+select * from newtypestbl_n5 where d<cast('11.22' as float);
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d<cast('11.22' as float);
+select * from newtypestbl_n5 where d<cast('11.22' as float);
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d<1;
+select * from newtypestbl_n5 where d<1;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d<1;
+select * from newtypestbl_n5 where d<1;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d<=11.22 sort by c;
+select * from newtypestbl_n5 where d<=11.22 sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d<=11.22 sort by c;
+select * from newtypestbl_n5 where d<=11.22 sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d<='11.22' sort by c;
+select * from newtypestbl_n5 where d<='11.22' sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d<='11.22' sort by c;
+select * from newtypestbl_n5 where d<='11.22' sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d<=cast('11.22' as float) sort by c;
+select * from newtypestbl_n5 where d<=cast('11.22' as float) sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d<=cast('11.22' as float) sort by c;
+select * from newtypestbl_n5 where d<=cast('11.22' as float) sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d<=cast('11.22' as decimal);
+select * from newtypestbl_n5 where d<=cast('11.22' as decimal);
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d<=cast('11.22' as decimal);
+select * from newtypestbl_n5 where d<=cast('11.22' as decimal);
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d<=11.22BD sort by c;
+select * from newtypestbl_n5 where d<=11.22BD sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d<=11.22BD sort by c;
+select * from newtypestbl_n5 where d<=11.22BD sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d<=12 sort by c;
+select * from newtypestbl_n5 where d<=12 sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d<=12 sort by c;
+select * from newtypestbl_n5 where d<=12 sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d in ('0.22', '1.0');
+select * from newtypestbl_n5 where d in ('0.22', '1.0');
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d in ('0.22', '1.0');
+select * from newtypestbl_n5 where d in ('0.22', '1.0');
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d in ('0.22', '11.22') sort by c;
+select * from newtypestbl_n5 where d in ('0.22', '11.22') sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d in ('0.22', '11.22') sort by c;
+select * from newtypestbl_n5 where d in ('0.22', '11.22') sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d in ('0.9', '1.0');
+select * from newtypestbl_n5 where d in ('0.9', '1.0');
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d in ('0.9', '1.0');
+select * from newtypestbl_n5 where d in ('0.9', '1.0');
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d in ('0.9', 0.22);
+select * from newtypestbl_n5 where d in ('0.9', 0.22);
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d in ('0.9', 0.22);
+select * from newtypestbl_n5 where d in ('0.9', 0.22);
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float)) 
sort by c;
+select * from newtypestbl_n5 where d in ('0.9', 0.22, cast('11.22' as float)) 
sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d in ('0.9', 0.22, cast('11.22' as float)) 
sort by c;
+select * from newtypestbl_n5 where d in ('0.9', 0.22, cast('11.22' as float)) 
sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d between 0 and 1;
+select * from newtypestbl_n5 where d between 0 and 1;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d between 0 and 1;
+select * from newtypestbl_n5 where d between 0 and 1;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d between 0 and 1000 sort by c;
+select * from newtypestbl_n5 where d between 0 and 1000 sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d between 0 and 1000 sort by c;
+select * from newtypestbl_n5 where d between 0 and 1000 sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d between 0 and '2.0';
+select * from newtypestbl_n5 where d between 0 and '2.0';
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d between 0 and '2.0';
+select * from newtypestbl_n5 where d between 0 and '2.0';
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d between 0 and cast(3 as float);
+select * from newtypestbl_n5 where d between 0 and cast(3 as float);
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d between 0 and cast(3 as float);
+select * from newtypestbl_n5 where d between 0 and cast(3 as float);
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where d between 1 and cast(30 as char(10));
+select * from newtypestbl_n5 where d between 1 and cast(30 as char(10));
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where d between 1 and cast(30 as char(10));
+select * from newtypestbl_n5 where d between 1 and cast(30 as char(10));

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/parquet_ppd_partition.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_partition.q 
b/ql/src/test/queries/clientpositive/parquet_ppd_partition.q
index 90672ec..0a80ec5 100644
--- a/ql/src/test/queries/clientpositive/parquet_ppd_partition.q
+++ b/ql/src/test/queries/clientpositive/parquet_ppd_partition.q
@@ -3,8 +3,8 @@ SET hive.optimize.index.filter=true;
 SET hive.optimize.ppd=true;
 
 -- Test predicate with partitioned columns
-CREATE TABLE part1 (id int, content string) PARTITIONED BY (p string) STORED 
AS PARQUET;
-ALTER TABLE part1 ADD PARTITION (p='p1');
-INSERT INTO TABLE part1 PARTITION (p='p1') VALUES (1, 'a'), (2, 'b');
-SELECT * FROM part1 WHERE p='p1';
-DROP TABLE part1 PURGE;
\ No newline at end of file
+CREATE TABLE part1_n1 (id int, content string) PARTITIONED BY (p string) 
STORED AS PARQUET;
+ALTER TABLE part1_n1 ADD PARTITION (p='p1');
+INSERT INTO TABLE part1_n1 PARTITION (p='p1') VALUES (1, 'a'), (2, 'b');
+SELECT * FROM part1_n1 WHERE p='p1';
+DROP TABLE part1_n1 PURGE;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/parquet_ppd_timestamp.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_timestamp.q 
b/ql/src/test/queries/clientpositive/parquet_ppd_timestamp.q
index dcb04a1..0d9df1f 100644
--- a/ql/src/test/queries/clientpositive/parquet_ppd_timestamp.q
+++ b/ql/src/test/queries/clientpositive/parquet_ppd_timestamp.q
@@ -7,96 +7,96 @@ SET hive.optimize.ppd=true;
 SET mapred.min.split.size=1000;
 SET mapred.max.split.size=5000;
 
-create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), ts 
timestamp) stored as parquet;
+create table newtypestbl_n4(c char(10), v varchar(10), d decimal(5,3), ts 
timestamp) stored as parquet;
 
-insert overwrite table newtypestbl select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("2011-01-01 01:01:01" as 
timestamp) from src src1 union all select cast("hello" as char(10)), 
cast("world" as varchar(10)), 11.22, cast("2011-01-20 01:01:01" as timestamp) 
from src src2 limit 10) uniontbl;
+insert overwrite table newtypestbl_n4 select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("2011-01-01 01:01:01" as 
timestamp) from src src1 union all select cast("hello" as char(10)), 
cast("world" as varchar(10)), 11.22, cast("2011-01-20 01:01:01" as timestamp) 
from src src2 limit 10) uniontbl;
 
 -- timestamp data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, 
BETWEEN tests)
-select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01';
+select * from newtypestbl_n4 where cast(ts as string)='2011-01-01 01:01:01';
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01';
+select * from newtypestbl_n4 where cast(ts as string)='2011-01-01 01:01:01';
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp);
+select * from newtypestbl_n4 where ts=cast('2011-01-01 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as timestamp);
+select * from newtypestbl_n4 where ts=cast('2011-01-01 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20));
+select * from newtypestbl_n4 where ts=cast('2011-01-01 01:01:01' as 
varchar(20));
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where ts=cast('2011-01-01 01:01:01' as varchar(20));
+select * from newtypestbl_n4 where ts=cast('2011-01-01 01:01:01' as 
varchar(20));
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp);
+select * from newtypestbl_n4 where ts!=cast('2011-01-01 01:01:01' as 
timestamp);
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where ts!=cast('2011-01-01 01:01:01' as timestamp);
+select * from newtypestbl_n4 where ts!=cast('2011-01-01 01:01:01' as 
timestamp);
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp);
+select * from newtypestbl_n4 where ts<cast('2011-01-20 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where ts<cast('2011-01-20 01:01:01' as timestamp);
+select * from newtypestbl_n4 where ts<cast('2011-01-20 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp) 
sort by c;
+select * from newtypestbl_n4 where ts<cast('2011-01-22 01:01:01' as timestamp) 
sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where ts<cast('2011-01-22 01:01:01' as timestamp) 
sort by c;
+select * from newtypestbl_n4 where ts<cast('2011-01-22 01:01:01' as timestamp) 
sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp);
+select * from newtypestbl_n4 where ts<cast('2010-10-01 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where ts<cast('2010-10-01 01:01:01' as timestamp);
+select * from newtypestbl_n4 where ts<cast('2010-10-01 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp);
+select * from newtypestbl_n4 where ts<=cast('2011-01-01 01:01:01' as 
timestamp);
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where ts<=cast('2011-01-01 01:01:01' as timestamp);
+select * from newtypestbl_n4 where ts<=cast('2011-01-01 01:01:01' as 
timestamp);
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp) 
sort by c;
+select * from newtypestbl_n4 where ts<=cast('2011-01-20 01:01:01' as 
timestamp) sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where ts<=cast('2011-01-20 01:01:01' as timestamp) 
sort by c;
+select * from newtypestbl_n4 where ts<=cast('2011-01-20 01:01:01' as 
timestamp) sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as 
timestamp), cast('2011-01-20 01:01:01' as timestamp));
+select * from newtypestbl_n4 where ts in (cast('2011-01-02 01:01:01' as 
timestamp), cast('2011-01-20 01:01:01' as timestamp));
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as 
timestamp), cast('2011-01-20 01:01:01' as timestamp));
+select * from newtypestbl_n4 where ts in (cast('2011-01-02 01:01:01' as 
timestamp), cast('2011-01-20 01:01:01' as timestamp));
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where ts in (cast('2011-01-01 01:01:01' as 
timestamp), cast('2011-01-20 01:01:01' as timestamp)) sort by c;
+select * from newtypestbl_n4 where ts in (cast('2011-01-01 01:01:01' as 
timestamp), cast('2011-01-20 01:01:01' as timestamp)) sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where ts in (cast('2011-01-01 01:01:01' as 
timestamp), cast('2011-01-20 01:01:01' as timestamp)) sort by c;
+select * from newtypestbl_n4 where ts in (cast('2011-01-01 01:01:01' as 
timestamp), cast('2011-01-20 01:01:01' as timestamp)) sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as 
timestamp), cast('2011-01-08 01:01:01' as timestamp));
+select * from newtypestbl_n4 where ts in (cast('2011-01-02 01:01:01' as 
timestamp), cast('2011-01-08 01:01:01' as timestamp));
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where ts in (cast('2011-01-02 01:01:01' as 
timestamp), cast('2011-01-08 01:01:01' as timestamp));
+select * from newtypestbl_n4 where ts in (cast('2011-01-02 01:01:01' as 
timestamp), cast('2011-01-08 01:01:01' as timestamp));
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as 
timestamp) and cast('2011-01-08 01:01:01' as timestamp);
+select * from newtypestbl_n4 where ts between cast('2010-10-01 01:01:01' as 
timestamp) and cast('2011-01-08 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as 
timestamp) and cast('2011-01-08 01:01:01' as timestamp);
+select * from newtypestbl_n4 where ts between cast('2010-10-01 01:01:01' as 
timestamp) and cast('2011-01-08 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as 
timestamp) and cast('2011-01-25 01:01:01' as timestamp) sort by c;
+select * from newtypestbl_n4 where ts between cast('2010-10-01 01:01:01' as 
timestamp) and cast('2011-01-25 01:01:01' as timestamp) sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as 
timestamp) and cast('2011-01-25 01:01:01' as timestamp) sort by c;
+select * from newtypestbl_n4 where ts between cast('2010-10-01 01:01:01' as 
timestamp) and cast('2011-01-25 01:01:01' as timestamp) sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as 
timestamp) and cast('2010-11-01 01:01:01' as timestamp);
+select * from newtypestbl_n4 where ts between cast('2010-10-01 01:01:01' as 
timestamp) and cast('2010-11-01 01:01:01' as timestamp);
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where ts between cast('2010-10-01 01:01:01' as 
timestamp) and cast('2010-11-01 01:01:01' as timestamp);
+select * from newtypestbl_n4 where ts between cast('2010-10-01 01:01:01' as 
timestamp) and cast('2010-11-01 01:01:01' as timestamp);

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/parquet_ppd_varchar.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_varchar.q 
b/ql/src/test/queries/clientpositive/parquet_ppd_varchar.q
index 926e721..68b05e4 100644
--- a/ql/src/test/queries/clientpositive/parquet_ppd_varchar.q
+++ b/ql/src/test/queries/clientpositive/parquet_ppd_varchar.q
@@ -7,74 +7,74 @@ SET hive.optimize.ppd=true;
 SET mapred.min.split.size=1000;
 SET mapred.max.split.size=5000;
 
-create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) 
stored as parquet;
+create table newtypestbl_n0(c char(10), v varchar(10), d decimal(5,3), da 
date) stored as parquet;
 
-insert overwrite table newtypestbl select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from 
src src1 union all select cast("hello" as char(10)), cast("world" as 
varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) 
uniontbl;
+insert overwrite table newtypestbl_n0 select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from 
src src1 union all select cast("hello" as char(10)), cast("world" as 
varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) 
uniontbl;
 
 set hive.optimize.index.filter=false;
 
 -- varchar data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, 
BETWEEN tests)
-select * from newtypestbl where v="bee";
+select * from newtypestbl_n0 where v="bee";
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where v="bee";
+select * from newtypestbl_n0 where v="bee";
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where v!="bee";
+select * from newtypestbl_n0 where v!="bee";
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where v!="bee";
+select * from newtypestbl_n0 where v!="bee";
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where v<"world";
+select * from newtypestbl_n0 where v<"world";
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where v<"world";
+select * from newtypestbl_n0 where v<"world";
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where v<="world" sort by c;
+select * from newtypestbl_n0 where v<="world" sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where v<="world" sort by c;
+select * from newtypestbl_n0 where v<="world" sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where v="bee   ";
+select * from newtypestbl_n0 where v="bee   ";
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where v="bee   ";
+select * from newtypestbl_n0 where v="bee   ";
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where v in ("bee", "orange");
+select * from newtypestbl_n0 where v in ("bee", "orange");
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where v in ("bee", "orange");
+select * from newtypestbl_n0 where v in ("bee", "orange");
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where v in ("bee", "world") sort by c;
+select * from newtypestbl_n0 where v in ("bee", "world") sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where v in ("bee", "world") sort by c;
+select * from newtypestbl_n0 where v in ("bee", "world") sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where v in ("orange");
+select * from newtypestbl_n0 where v in ("orange");
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where v in ("orange");
+select * from newtypestbl_n0 where v in ("orange");
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where v between "bee" and "orange";
+select * from newtypestbl_n0 where v between "bee" and "orange";
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where v between "bee" and "orange";
+select * from newtypestbl_n0 where v between "bee" and "orange";
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where v between "bee" and "zombie" sort by c;
+select * from newtypestbl_n0 where v between "bee" and "zombie" sort by c;
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where v between "bee" and "zombie" sort by c;
+select * from newtypestbl_n0 where v between "bee" and "zombie" sort by c;
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where v between "orange" and "pine";
+select * from newtypestbl_n0 where v between "orange" and "pine";
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where v between "orange" and "pine";
\ No newline at end of file
+select * from newtypestbl_n0 where v between "orange" and "pine";
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/parquet_predicate_pushdown.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_predicate_pushdown.q 
b/ql/src/test/queries/clientpositive/parquet_predicate_pushdown.q
index bfa3715..1c5c1f3 100644
--- a/ql/src/test/queries/clientpositive/parquet_predicate_pushdown.q
+++ b/ql/src/test/queries/clientpositive/parquet_predicate_pushdown.q
@@ -17,7 +17,7 @@ CREATE TABLE tbl_pred(t tinyint,
            bin binary)
 STORED AS PARQUET;
 
-CREATE TABLE staging(t tinyint,
+CREATE TABLE staging_n0(t tinyint,
            si smallint,
            i int,
            b bigint,
@@ -31,9 +31,9 @@ CREATE TABLE staging(t tinyint,
 ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
 STORED AS TEXTFILE;
 
-LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging;
+LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE 
staging_n0;
 
-INSERT INTO TABLE tbl_pred select * from staging;
+INSERT INTO TABLE tbl_pred select * from staging_n0;
 
 -- no predicate case. the explain plan should not have filter expression in 
table scan operator
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/parquet_table_with_subschema.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_table_with_subschema.q 
b/ql/src/test/queries/clientpositive/parquet_table_with_subschema.q
index f5d9b8e..1ec102e 100644
--- a/ql/src/test/queries/clientpositive/parquet_table_with_subschema.q
+++ b/ql/src/test/queries/clientpositive/parquet_table_with_subschema.q
@@ -1,15 +1,15 @@
 set hive.vectorized.execution.enabled=false;
 
 -- Sometimes, the user wants to create a table from just a portion of the file 
schema;
--- This test makes sure that this scenario works;
+-- This test_n6 makes sure that this scenario works;
 
-DROP TABLE test;
+DROP TABLE test_n6;
 
 -- Current file schema is: (id int, name string, address 
struct<number:int,street:string,zip:string>);
--- Creates a table from just a portion of the file schema, including struct 
elements (test lower/upper case as well)
-CREATE TABLE test (Name string, address struct<Zip:string,Street:string>) 
STORED AS PARQUET;
+-- Creates a table from just a portion of the file schema, including struct 
elements (test_n6 lower/upper case as well)
+CREATE TABLE test_n6 (Name string, address struct<Zip:string,Street:string>) 
STORED AS PARQUET;
 
-LOAD DATA LOCAL INPATH '../../data/files/HiveGroup.parquet' OVERWRITE INTO 
TABLE test;
-SELECT * FROM test;
+LOAD DATA LOCAL INPATH '../../data/files/HiveGroup.parquet' OVERWRITE INTO 
TABLE test_n6;
+SELECT * FROM test_n6;
 
-DROP TABLE test;
\ No newline at end of file
+DROP TABLE test_n6;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q
 
b/ql/src/test/queries/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q
index e17d48b..0fded10 100644
--- 
a/ql/src/test/queries/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q
+++ 
b/ql/src/test/queries/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q
@@ -1,8 +1,8 @@
 set hive.vectorized.execution.enabled=false;
 set hive.mapred.mode=nonstrict;
 
-DROP TABLE parquet_types_staging;
-DROP TABLE parquet_types;
+DROP TABLE parquet_types_staging_n2;
+DROP TABLE parquet_types_n1;
 
 set hive.vectorized.execution.enabled=true;
 set hive.vectorized.execution.reduce.enabled=true;
@@ -10,7 +10,7 @@ set hive.vectorized.use.row.serde.deserialize=true;
 set hive.vectorized.use.vector.serde.deserialize=true;
 set hive.vectorized.execution.reduce.groupby.enabled = true;
 
-CREATE TABLE parquet_types_staging (
+CREATE TABLE parquet_types_staging_n2 (
   cint int,
   ctinyint tinyint,
   csmallint smallint,
@@ -30,7 +30,7 @@ FIELDS TERMINATED BY '|'
 COLLECTION ITEMS TERMINATED BY ','
 MAP KEYS TERMINATED BY ':';
 
-CREATE TABLE parquet_types (
+CREATE TABLE parquet_types_n1 (
   cint int,
   ctinyint tinyint,
   csmallint smallint,
@@ -48,13 +48,13 @@ CREATE TABLE parquet_types (
 ) STORED AS PARQUET;
 
 LOAD DATA LOCAL INPATH '../../data/files/parquet_non_dictionary_types.txt' 
OVERWRITE INTO TABLE
-parquet_types_staging;
+parquet_types_staging_n2;
 
-SELECT * FROM parquet_types_staging;
+SELECT * FROM parquet_types_staging_n2;
 
-INSERT OVERWRITE TABLE parquet_types
+INSERT OVERWRITE TABLE parquet_types_n1
 SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, 
cvarchar,
-unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging;
+unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging_n2;
 
 -- test types in group by
 
@@ -64,7 +64,7 @@ EXPLAIN SELECT ctinyint,
   COUNT(cstring1),
   ROUND(AVG(cfloat), 5),
   ROUND(STDDEV_POP(cdouble),5)
-FROM parquet_types
+FROM parquet_types_n1
 GROUP BY ctinyint
 ORDER BY ctinyint
 ;
@@ -75,22 +75,22 @@ SELECT ctinyint,
   COUNT(cstring1),
   ROUND(AVG(cfloat), 5),
   ROUND(STDDEV_POP(cdouble),5)
-FROM parquet_types
+FROM parquet_types_n1
 GROUP BY ctinyint
 ORDER BY ctinyint
 ;
 
-EXPLAIN SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY 
cfloat;
-SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat;
+EXPLAIN SELECT cfloat, count(*) FROM parquet_types_n1 GROUP BY cfloat ORDER BY 
cfloat;
+SELECT cfloat, count(*) FROM parquet_types_n1 GROUP BY cfloat ORDER BY cfloat;
 
-EXPLAIN SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY 
cchar;
-SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar;
+EXPLAIN SELECT cchar, count(*) FROM parquet_types_n1 GROUP BY cchar ORDER BY 
cchar;
+SELECT cchar, count(*) FROM parquet_types_n1 GROUP BY cchar ORDER BY cchar;
 
-EXPLAIN SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER 
BY cvarchar;
-SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY 
cvarchar;
+EXPLAIN SELECT cvarchar, count(*) FROM parquet_types_n1 GROUP BY cvarchar 
ORDER BY cvarchar;
+SELECT cvarchar, count(*) FROM parquet_types_n1 GROUP BY cvarchar ORDER BY 
cvarchar;
 
-EXPLAIN SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER 
BY cstring1;
-SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY 
cstring1;
+EXPLAIN SELECT cstring1, count(*) FROM parquet_types_n1 GROUP BY cstring1 
ORDER BY cstring1;
+SELECT cstring1, count(*) FROM parquet_types_n1 GROUP BY cstring1 ORDER BY 
cstring1;
 
-EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary;
-SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary;
\ No newline at end of file
+EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types_n1 GROUP BY cbinary;
+SELECT hex(cbinary), count(*) FROM parquet_types_n1 GROUP BY cbinary;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/parquet_types_vectorization.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_types_vectorization.q 
b/ql/src/test/queries/clientpositive/parquet_types_vectorization.q
index 1f353ae..67a27e8 100644
--- a/ql/src/test/queries/clientpositive/parquet_types_vectorization.q
+++ b/ql/src/test/queries/clientpositive/parquet_types_vectorization.q
@@ -1,6 +1,6 @@
 set hive.mapred.mode=nonstrict;
-DROP TABLE parquet_types_staging;
-DROP TABLE parquet_types;
+DROP TABLE parquet_types_staging_n1;
+DROP TABLE parquet_types_n0;
 
 set hive.vectorized.execution.enabled=true;
 set hive.vectorized.execution.reduce.enabled=true;
@@ -9,7 +9,7 @@ set hive.vectorized.use.vector.serde.deserialize=true;
 set hive.vectorized.execution.reduce.groupby.enabled = true;
 set hive.llap.cache.allow.synthetic.fileid=true;
 
-CREATE TABLE parquet_types_staging (
+CREATE TABLE parquet_types_staging_n1 (
   cint int,
   ctinyint tinyint,
   csmallint smallint,
@@ -29,7 +29,7 @@ FIELDS TERMINATED BY '|'
 COLLECTION ITEMS TERMINATED BY ','
 MAP KEYS TERMINATED BY ':';
 
-CREATE TABLE parquet_types (
+CREATE TABLE parquet_types_n0 (
   cint int,
   ctinyint tinyint,
   csmallint smallint,
@@ -46,13 +46,13 @@ CREATE TABLE parquet_types (
   d date
 ) STORED AS PARQUET;
 
-LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO 
TABLE parquet_types_staging;
+LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO 
TABLE parquet_types_staging_n1;
 
-SELECT * FROM parquet_types_staging;
+SELECT * FROM parquet_types_staging_n1;
 
-INSERT OVERWRITE TABLE parquet_types
+INSERT OVERWRITE TABLE parquet_types_n0
 SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, 
cvarchar,
-unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging;
+unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging_n1;
 
 -- test types in group by
 
@@ -62,7 +62,7 @@ EXPLAIN SELECT ctinyint,
   COUNT(cstring1),
   ROUND(AVG(cfloat), 5),
   ROUND(STDDEV_POP(cdouble),5)
-FROM parquet_types
+FROM parquet_types_n0
 GROUP BY ctinyint
 ORDER BY ctinyint
 ;
@@ -73,25 +73,25 @@ SELECT ctinyint,
   COUNT(cstring1),
   ROUND(AVG(cfloat), 5),
   ROUND(STDDEV_POP(cdouble),5)
-FROM parquet_types
+FROM parquet_types_n0
 GROUP BY ctinyint
 ORDER BY ctinyint
 ;
 
-EXPLAIN SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY 
cfloat;
-SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat;
+EXPLAIN SELECT cfloat, count(*) FROM parquet_types_n0 GROUP BY cfloat ORDER BY 
cfloat;
+SELECT cfloat, count(*) FROM parquet_types_n0 GROUP BY cfloat ORDER BY cfloat;
 
-EXPLAIN SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY 
cchar;
-SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar;
+EXPLAIN SELECT cchar, count(*) FROM parquet_types_n0 GROUP BY cchar ORDER BY 
cchar;
+SELECT cchar, count(*) FROM parquet_types_n0 GROUP BY cchar ORDER BY cchar;
 
-EXPLAIN SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER 
BY cvarchar;
-SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY 
cvarchar;
+EXPLAIN SELECT cvarchar, count(*) FROM parquet_types_n0 GROUP BY cvarchar 
ORDER BY cvarchar;
+SELECT cvarchar, count(*) FROM parquet_types_n0 GROUP BY cvarchar ORDER BY 
cvarchar;
 
-EXPLAIN SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER 
BY cstring1;
-SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY 
cstring1;
+EXPLAIN SELECT cstring1, count(*) FROM parquet_types_n0 GROUP BY cstring1 
ORDER BY cstring1;
+SELECT cstring1, count(*) FROM parquet_types_n0 GROUP BY cstring1 ORDER BY 
cstring1;
 
-EXPLAIN SELECT t, count(*) FROM parquet_types GROUP BY t ORDER BY t;
-SELECT t, count(*) FROM parquet_types GROUP BY t ORDER BY t;
+EXPLAIN SELECT t, count(*) FROM parquet_types_n0 GROUP BY t ORDER BY t;
+SELECT t, count(*) FROM parquet_types_n0 GROUP BY t ORDER BY t;
 
-EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary;
-SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary;
\ No newline at end of file
+EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types_n0 GROUP BY cbinary;
+SELECT hex(cbinary), count(*) FROM parquet_types_n0 GROUP BY cbinary;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/parquet_vectorization_part_project.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/parquet_vectorization_part_project.q 
b/ql/src/test/queries/clientpositive/parquet_vectorization_part_project.q
index d5b2e3c..c36cfcb 100644
--- a/ql/src/test/queries/clientpositive/parquet_vectorization_part_project.q
+++ b/ql/src/test/queries/clientpositive/parquet_vectorization_part_project.q
@@ -4,9 +4,9 @@ set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 set hive.fetch.task.conversion=none;
 
-CREATE TABLE alltypesparquet_part(ctinyint tinyint, csmallint smallint, cint 
int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 
string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, 
cboolean2 boolean) partitioned by (ds string) STORED AS PARQUET;
-insert overwrite table alltypesparquet_part partition (ds='2011') select * 
from alltypesparquet order by ctinyint, cint, cbigint limit 100;
-insert overwrite table alltypesparquet_part partition (ds='2012') select * 
from alltypesparquet order by ctinyint, cint, cbigint limit 100;
+CREATE TABLE alltypesparquet_part_n0(ctinyint tinyint, csmallint smallint, 
cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, 
cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 
boolean, cboolean2 boolean) partitioned by (ds string) STORED AS PARQUET;
+insert overwrite table alltypesparquet_part_n0 partition (ds='2011') select * 
from alltypesparquet order by ctinyint, cint, cbigint limit 100;
+insert overwrite table alltypesparquet_part_n0 partition (ds='2012') select * 
from alltypesparquet order by ctinyint, cint, cbigint limit 100;
 
-explain vectorization select (cdouble+2) c1 from alltypesparquet_part order by 
c1 limit 10;
-select (cdouble+2) c1 from alltypesparquet_part order by c1 limit 10;
+explain vectorization select (cdouble+2) c1 from alltypesparquet_part_n0 order 
by c1 limit 10;
+select (cdouble+2) c1 from alltypesparquet_part_n0 order by c1 limit 10;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/partInit.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partInit.q 
b/ql/src/test/queries/clientpositive/partInit.q
index 62299b2..e2703eb 100644
--- a/ql/src/test/queries/clientpositive/partInit.q
+++ b/ql/src/test/queries/clientpositive/partInit.q
@@ -1,13 +1,13 @@
 set hive.mapred.mode=nonstrict;
-CREATE TABLE empty (c INT) PARTITIONED BY (p INT);
-SELECT MAX(c) FROM empty;
-SELECT MAX(p) FROM empty;
+CREATE TABLE empty_n1 (c INT) PARTITIONED BY (p INT);
+SELECT MAX(c) FROM empty_n1;
+SELECT MAX(p) FROM empty_n1;
 
-ALTER TABLE empty ADD PARTITION (p=1);
+ALTER TABLE empty_n1 ADD PARTITION (p=1);
 
 set hive.optimize.metadataonly=true;
-SELECT MAX(p) FROM empty;
+SELECT MAX(p) FROM empty_n1;
 
 set hive.optimize.metadataonly=false;
-SELECT MAX(p) FROM empty;
+SELECT MAX(p) FROM empty_n1;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/part_inherit_tbl_props.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/part_inherit_tbl_props.q 
b/ql/src/test/queries/clientpositive/part_inherit_tbl_props.q
index 3ee1b4a..907fa02 100644
--- a/ql/src/test/queries/clientpositive/part_inherit_tbl_props.q
+++ b/ql/src/test/queries/clientpositive/part_inherit_tbl_props.q
@@ -1,8 +1,8 @@
 set hive.metastore.partition.inherit.table.properties=a,b;
 -- The property needs to be unset at the end of the test till 
HIVE-3109/HIVE-3112 is fixed
 
-create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties 
('a'='myval','b'='yourval','c'='noval');
-alter table mytbl add partition (c2 = 'v1');
-describe formatted mytbl partition (c2='v1');
+create table mytbl_n0 (c1 tinyint) partitioned by (c2 string) tblproperties 
('a'='myval','b'='yourval','c'='noval');
+alter table mytbl_n0 add partition (c2 = 'v1');
+describe formatted mytbl_n0 partition (c2='v1');
 
 set hive.metastore.partition.inherit.table.properties=;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/part_inherit_tbl_props_empty.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/part_inherit_tbl_props_empty.q 
b/ql/src/test/queries/clientpositive/part_inherit_tbl_props_empty.q
index f3f0335..d54c7e6 100644
--- a/ql/src/test/queries/clientpositive/part_inherit_tbl_props_empty.q
+++ b/ql/src/test/queries/clientpositive/part_inherit_tbl_props_empty.q
@@ -1,4 +1,4 @@
 set hive.metastore.partition.inherit.table.properties="";
-create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties 
('a'='myval','b'='yourval','c'='noval');
-alter table mytbl add partition (c2 = 'v1');
-describe formatted mytbl partition (c2='v1');
+create table mytbl_n2 (c1 tinyint) partitioned by (c2 string) tblproperties 
('a'='myval','b'='yourval','c'='noval');
+alter table mytbl_n2 add partition (c2 = 'v1');
+describe formatted mytbl_n2 partition (c2='v1');

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/partcols1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partcols1.q 
b/ql/src/test/queries/clientpositive/partcols1.q
index 7f47005..4be9576 100644
--- a/ql/src/test/queries/clientpositive/partcols1.q
+++ b/ql/src/test/queries/clientpositive/partcols1.q
@@ -1,11 +1,11 @@
 --! qt:dataset:src
 
-create table test1(col1 string) partitioned by (partitionId int);
-insert overwrite table test1 partition (partitionId=1)
+create table test1_n15(col1 string) partitioned by (partitionId int);
+insert overwrite table test1_n15 partition (partitionId=1)
   select key from src tablesample (10 rows);
 
  FROM (
- FROM test1
+ FROM test1_n15
  SELECT partitionId, 111 as col2, 222 as col3, 333 as col4
  WHERE partitionId = 1
  DISTRIBUTE BY partitionId

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/partial_column_stats.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partial_column_stats.q 
b/ql/src/test/queries/clientpositive/partial_column_stats.q
index 8ff65ac..f66525a 100644
--- a/ql/src/test/queries/clientpositive/partial_column_stats.q
+++ b/ql/src/test/queries/clientpositive/partial_column_stats.q
@@ -1,9 +1,9 @@
 set hive.mapred.mode=nonstrict;
 
-create table t1 (key int, data struct<name:string, id: string>, value string);
+create table t1_n53 (key int, data struct<name:string, id: string>, value 
string);
 
-explain analyze table t1 compute statistics for columns;
+explain analyze table t1_n53 compute statistics for columns;
 
-analyze table t1 compute statistics for columns;
+analyze table t1_n53 compute statistics for columns;
 
-desc formatted t1 value;
+desc formatted t1_n53 value;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/partition_condition_remover.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_condition_remover.q 
b/ql/src/test/queries/clientpositive/partition_condition_remover.q
index f417eb7..95fb09f 100644
--- a/ql/src/test/queries/clientpositive/partition_condition_remover.q
+++ b/ql/src/test/queries/clientpositive/partition_condition_remover.q
@@ -1,14 +1,14 @@
 --! qt:dataset:alltypesorc
 
-drop table foo;
+drop table foo_n5;
 
-create table foo (i int) partitioned by (s string);
+create table foo_n5 (i int) partitioned by (s string);
 
-insert overwrite table foo partition(s='foo') select cint from alltypesorc 
limit 10;
-insert overwrite table foo partition(s='bar') select cint from alltypesorc 
limit 10;
+insert overwrite table foo_n5 partition(s='foo_n5') select cint from 
alltypesorc limit 10;
+insert overwrite table foo_n5 partition(s='bar') select cint from alltypesorc 
limit 10;
 
-explain select * from foo where s not in ('bar');
-select * from foo where s not in ('bar');
+explain select * from foo_n5 where s not in ('bar');
+select * from foo_n5 where s not in ('bar');
 
 
-drop table foo;
\ No newline at end of file
+drop table foo_n5;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/partition_decode_name.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_decode_name.q 
b/ql/src/test/queries/clientpositive/partition_decode_name.q
index 7d851d9..3d72cbe 100644
--- a/ql/src/test/queries/clientpositive/partition_decode_name.q
+++ b/ql/src/test/queries/clientpositive/partition_decode_name.q
@@ -1,22 +1,22 @@
 --! qt:dataset:src
-create table sc as select * 
+create table sc_n0 as select * 
 from (select '2011-01-11', '2011-01-11+14:18:26' from src tablesample (1 rows)
       union all 
       select '2011-01-11', '2011-01-11+15:18:26' from src tablesample (1 rows)
       union all 
       select '2011-01-11', '2011-01-11+16:18:26' from src tablesample (1 rows) 
) s;
 
-create table sc_part (key string) partitioned by (ts string) stored as rcfile;
+create table sc_part_n0 (key string) partitioned by (ts string) stored as 
rcfile;
 
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
 
 set hive.decode.partition.name=false;
-insert overwrite table sc_part partition(ts) select * from sc;
-show partitions sc_part;
-select count(*) from sc_part where ts is not null;
+insert overwrite table sc_part_n0 partition(ts) select * from sc_n0;
+show partitions sc_part_n0;
+select count(*) from sc_part_n0 where ts is not null;
 
 set hive.decode.partition.name=true;
-insert overwrite table sc_part partition(ts) select * from sc;
-show partitions sc_part;
-select count(*) from sc_part where ts is not null;
+insert overwrite table sc_part_n0 partition(ts) select * from sc_n0;
+show partitions sc_part_n0;
+select count(*) from sc_part_n0 where ts is not null;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/partition_shared_scan.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_shared_scan.q 
b/ql/src/test/queries/clientpositive/partition_shared_scan.q
index ccb8e74..55aff6a 100644
--- a/ql/src/test/queries/clientpositive/partition_shared_scan.q
+++ b/ql/src/test/queries/clientpositive/partition_shared_scan.q
@@ -2,24 +2,24 @@
 --! qt:dataset:alltypesorc
 set hive.merge.nway.joins=false;
 
-drop table foo;
+drop table foo_n1;
 
-create table foo (i int) partitioned by (s string);
-insert overwrite table foo partition(s='foo') select cint from alltypesorc 
limit 10;
-insert overwrite table foo partition(s='bar') select cint from alltypesorc 
limit 10;
+create table foo_n1 (i int) partitioned by (s string);
+insert overwrite table foo_n1 partition(s='foo_n1') select cint from 
alltypesorc limit 10;
+insert overwrite table foo_n1 partition(s='bar') select cint from alltypesorc 
limit 10;
 
 explain
 select *
-from foo f1
+from foo_n1 f1
 join part p1 on (p1.p_partkey = f1.i)
-join foo f2 on (f1.i = f2.i)
-where f1.s='foo' and f2.s='bar';
+join foo_n1 f2 on (f1.i = f2.i)
+where f1.s='foo_n1' and f2.s='bar';
 
 explain
 select *
-from foo f1
+from foo_n1 f1
 join part p1 on (p1.p_partkey = f1.i)
-join foo f2 on (f1.i = f2.i)
-where f1.s='foo' and f2.s='foo';
+join foo_n1 f2 on (f1.i = f2.i)
+where f1.s='foo_n1' and f2.s='foo_n1';
 
-drop table foo;
+drop table foo_n1;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/partition_type_check.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_type_check.q 
b/ql/src/test/queries/clientpositive/partition_type_check.q
index 081d0a3..fcb5caa 100644
--- a/ql/src/test/queries/clientpositive/partition_type_check.q
+++ b/ql/src/test/queries/clientpositive/partition_type_check.q
@@ -3,24 +3,24 @@ set hive.mapred.mode=nonstrict;
 set hive.typecheck.on.insert = true;
 
 -- begin part(string, string) pass(string, int)
-CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day string) 
stored as textfile;
-LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 
PARTITION(month='June', day=2);
+CREATE TABLE tab1_n3 (id1 int,id2 string) PARTITIONED BY(month string,day 
string) stored as textfile;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1_n3 
PARTITION(month='June', day=2);
 
-select * from tab1;
-drop table tab1;
+select * from tab1_n3;
+drop table tab1_n3;
 
 -- begin part(string, int) pass(string, string)
-CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day int) 
stored as textfile;
-LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 
PARTITION(month='June', day='2');
+CREATE TABLE tab1_n3 (id1 int,id2 string) PARTITIONED BY(month string,day int) 
stored as textfile;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1_n3 
PARTITION(month='June', day='2');
 
-select * from tab1;
-drop table tab1;
+select * from tab1_n3;
+drop table tab1_n3;
 
 -- begin part(string, date) pass(string, date)
-create table tab1 (id1 int, id2 string) PARTITIONED BY(month string,day date) 
stored as textfile;
-alter table tab1 add partition (month='June', day='2008-01-01');
-LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 
PARTITION(month='June', day='2008-01-01');
+create table tab1_n3 (id1 int, id2 string) PARTITIONED BY(month string,day 
date) stored as textfile;
+alter table tab1_n3 add partition (month='June', day='2008-01-01');
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1_n3 
PARTITION(month='June', day='2008-01-01');
 
-select id1, id2, day from tab1 where day='2008-01-01';
-drop table tab1;
+select id1, id2, day from tab1_n3 where day='2008-01-01';
+drop table tab1_n3;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/partition_wise_fileformat.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat.q 
b/ql/src/test/queries/clientpositive/partition_wise_fileformat.q
index 3b547c3..1796a00 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat.q
@@ -2,33 +2,33 @@
 set hive.mapred.mode=nonstrict;
 
 
-create table partition_test_partitioned(key string, value string) partitioned 
by (dt string);
+create table partition_test_partitioned_n1(key string, value string) 
partitioned by (dt string);
 
-insert overwrite table partition_test_partitioned partition(dt=100) select * 
from src1;
-show table extended like partition_test_partitioned;
-show table extended like partition_test_partitioned partition(dt=100);
-select key from partition_test_partitioned where dt=100;
-select key from partition_test_partitioned;
+insert overwrite table partition_test_partitioned_n1 partition(dt=100) select 
* from src1;
+show table extended like partition_test_partitioned_n1;
+show table extended like partition_test_partitioned_n1 partition(dt=100);
+select key from partition_test_partitioned_n1 where dt=100;
+select key from partition_test_partitioned_n1;
 
-alter table partition_test_partitioned set fileformat rcfile;
-insert overwrite table partition_test_partitioned partition(dt=101) select * 
from src1;
-show table extended like partition_test_partitioned;
-show table extended like partition_test_partitioned partition(dt=100);
-show table extended like partition_test_partitioned partition(dt=101);
-select key from partition_test_partitioned where dt=100;
-select key from partition_test_partitioned where dt=101;
-select key from partition_test_partitioned;
+alter table partition_test_partitioned_n1 set fileformat rcfile;
+insert overwrite table partition_test_partitioned_n1 partition(dt=101) select 
* from src1;
+show table extended like partition_test_partitioned_n1;
+show table extended like partition_test_partitioned_n1 partition(dt=100);
+show table extended like partition_test_partitioned_n1 partition(dt=101);
+select key from partition_test_partitioned_n1 where dt=100;
+select key from partition_test_partitioned_n1 where dt=101;
+select key from partition_test_partitioned_n1;
 
-alter table partition_test_partitioned set fileformat Sequencefile;
-insert overwrite table partition_test_partitioned partition(dt=102) select * 
from src1;
-show table extended like partition_test_partitioned;
-show table extended like partition_test_partitioned partition(dt=100);
-show table extended like partition_test_partitioned partition(dt=101);
-show table extended like partition_test_partitioned partition(dt=102);
-select key from partition_test_partitioned where dt=100;
-select key from partition_test_partitioned where dt=101;
-select key from partition_test_partitioned where dt=102;
-select key from partition_test_partitioned;
+alter table partition_test_partitioned_n1 set fileformat Sequencefile;
+insert overwrite table partition_test_partitioned_n1 partition(dt=102) select 
* from src1;
+show table extended like partition_test_partitioned_n1;
+show table extended like partition_test_partitioned_n1 partition(dt=100);
+show table extended like partition_test_partitioned_n1 partition(dt=101);
+show table extended like partition_test_partitioned_n1 partition(dt=102);
+select key from partition_test_partitioned_n1 where dt=100;
+select key from partition_test_partitioned_n1 where dt=101;
+select key from partition_test_partitioned_n1 where dt=102;
+select key from partition_test_partitioned_n1;
 
-select key from partition_test_partitioned where dt >=100 and dt <= 102;
+select key from partition_test_partitioned_n1 where dt >=100 and dt <= 102;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q 
b/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
index 2ff680e..2394acb 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q
@@ -2,19 +2,19 @@
 set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 
 -- This tests that the schema can be changed for binary serde data
-create table partition_test_partitioned(key string, value string) partitioned 
by (dt string) stored as rcfile;
-alter table partition_test_partitioned set serde 
'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
-insert overwrite table partition_test_partitioned partition(dt='1') select * 
from src where key = 238;
+create table partition_test_partitioned_n4(key string, value string) 
partitioned by (dt string) stored as rcfile;
+alter table partition_test_partitioned_n4 set serde 
'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
+insert overwrite table partition_test_partitioned_n4 partition(dt='1') select 
* from src where key = 238;
 
-select * from partition_test_partitioned where dt is not null;
-select key+key, value from partition_test_partitioned where dt is not null;
+select * from partition_test_partitioned_n4 where dt is not null;
+select key+key, value from partition_test_partitioned_n4 where dt is not null;
 set hive.metastore.disallow.incompatible.col.type.changes=false;
-alter table partition_test_partitioned change key key int;
+alter table partition_test_partitioned_n4 change key key int;
 reset hive.metastore.disallow.incompatible.col.type.changes;
-select key+key, value from partition_test_partitioned where dt is not null;
-select * from partition_test_partitioned where dt is not null;
+select key+key, value from partition_test_partitioned_n4 where dt is not null;
+select * from partition_test_partitioned_n4 where dt is not null;
 
-alter table partition_test_partitioned add columns (value2 string);
+alter table partition_test_partitioned_n4 add columns (value2 string);
 
-select key+key, value from partition_test_partitioned where dt is not null;
-select * from partition_test_partitioned where dt is not null;
+select key+key, value from partition_test_partitioned_n4 where dt is not null;
+select * from partition_test_partitioned_n4 where dt is not null;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q 
b/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
index 0eb6d18..c9379f4 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q
@@ -2,26 +2,26 @@
 set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 
 -- This tests that the schema can be changed for binary serde data
-create table partition_test_partitioned(key string, value string) partitioned 
by (dt string) stored as rcfile;
-alter table partition_test_partitioned set serde 
'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
-insert overwrite table partition_test_partitioned partition(dt='1') select * 
from src where key = 238;
+create table partition_test_partitioned_n9(key string, value string) 
partitioned by (dt string) stored as rcfile;
+alter table partition_test_partitioned_n9 set serde 
'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
+insert overwrite table partition_test_partitioned_n9 partition(dt='1') select 
* from src where key = 238;
 
-select * from partition_test_partitioned where dt is not null;
-select key+key, value from partition_test_partitioned where dt is not null;
+select * from partition_test_partitioned_n9 where dt is not null;
+select key+key, value from partition_test_partitioned_n9 where dt is not null;
 set hive.metastore.disallow.incompatible.col.type.changes=false;
-alter table partition_test_partitioned change key key int;
+alter table partition_test_partitioned_n9 change key key int;
 reset hive.metastore.disallow.incompatible.col.type.changes;
-select key+key, value from partition_test_partitioned where dt is not null;
-select * from partition_test_partitioned where dt is not null;
+select key+key, value from partition_test_partitioned_n9 where dt is not null;
+select * from partition_test_partitioned_n9 where dt is not null;
 
-insert overwrite table partition_test_partitioned partition(dt='2') select * 
from src where key = 97;
+insert overwrite table partition_test_partitioned_n9 partition(dt='2') select 
* from src where key = 97;
 
-alter table partition_test_partitioned add columns (value2 string);
+alter table partition_test_partitioned_n9 add columns (value2 string);
 
-select key+key, value from partition_test_partitioned where dt is not null;
-select * from partition_test_partitioned where dt is not null;
+select key+key, value from partition_test_partitioned_n9 where dt is not null;
+select * from partition_test_partitioned_n9 where dt is not null;
 
-insert overwrite table partition_test_partitioned partition(dt='3') select 
key, value, value from src where key = 200;
+insert overwrite table partition_test_partitioned_n9 partition(dt='3') select 
key, value, value from src where key = 200;
 
-select key+key, value, value2 from partition_test_partitioned where dt is not 
null;
-select * from partition_test_partitioned where dt is not null;
+select key+key, value, value2 from partition_test_partitioned_n9 where dt is 
not null;
+select * from partition_test_partitioned_n9 where dt is not null;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q 
b/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
index f495d72..0d8cfbb 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q
@@ -3,19 +3,19 @@ set hive.mapred.mode=nonstrict;
 set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 
 -- This tests that the schema can be changed for partitioned tables for binary 
serde data for joins
-create table T1(key string, value string) partitioned by (dt string) stored as 
rcfile;
-alter table T1 set serde 
'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
-insert overwrite table T1 partition (dt='1') select * from src where key = 238 
or key = 97;
+create table T1_n16(key string, value string) partitioned by (dt string) 
stored as rcfile;
+alter table T1_n16 set serde 
'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
+insert overwrite table T1_n16 partition (dt='1') select * from src where key = 
238 or key = 97;
 set hive.metastore.disallow.incompatible.col.type.changes=false;
-alter table T1 change key key int;
+alter table T1_n16 change key key int;
 
-insert overwrite table T1 partition (dt='2') select * from src where key = 238 
or key = 97;
+insert overwrite table T1_n16 partition (dt='2') select * from src where key = 
238 or key = 97;
 
-alter table T1 change key key string;
+alter table T1_n16 change key key string;
 
-create table T2(key string, value string) partitioned by (dt string) stored as 
rcfile;
-insert overwrite table T2 partition (dt='1') select * from src where key = 238 
or key = 97;
+create table T2_n10(key string, value string) partitioned by (dt string) 
stored as rcfile;
+insert overwrite table T2_n10 partition (dt='1') select * from src where key = 
238 or key = 97;
 
-select /* + MAPJOIN(a) */ count(*) FROM T1 a JOIN T2 b ON a.key = b.key;
-select count(*) FROM T1 a JOIN T2 b ON a.key = b.key;
+select /* + MAPJOIN(a) */ count(*) FROM T1_n16 a JOIN T2_n10 b ON a.key = 
b.key;
+select count(*) FROM T1_n16 a JOIN T2_n10 b ON a.key = b.key;
 reset hive.metastore.disallow.incompatible.col.type.changes;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q 
b/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q
index c27e45b..8087983 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q
@@ -2,56 +2,56 @@
 set hive.mapred.mode=nonstrict;
 set hive.exec.reducers.max = 1;
 
-CREATE TABLE tbl1(key int, value string) PARTITIONED by (ds string)
+CREATE TABLE tbl1_n8(key int, value string) PARTITIONED by (ds string)
 CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS rcfile;
-CREATE TABLE tbl2(key int, value string) PARTITIONED by (ds string)
+CREATE TABLE tbl2_n7(key int, value string) PARTITIONED by (ds string)
 CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS rcfile;
 
-alter table tbl1 set serde 
'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
-alter table tbl2 set serde 
'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
+alter table tbl1_n8 set serde 
'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
+alter table tbl2_n7 set serde 
'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
 
-insert overwrite table tbl1 partition (ds='1') select * from src where key < 
10;
-insert overwrite table tbl2 partition (ds='1') select * from src where key < 
10;
+insert overwrite table tbl1_n8 partition (ds='1') select * from src where key 
< 10;
+insert overwrite table tbl2_n7 partition (ds='1') select * from src where key 
< 10;
 
-alter table tbl1 change key key int;
-insert overwrite table tbl1 partition (ds='2') select * from src where key < 
10;
+alter table tbl1_n8 change key key int;
+insert overwrite table tbl1_n8 partition (ds='2') select * from src where key 
< 10;
 
-alter table tbl1 change key key string;
+alter table tbl1_n8 change key key string;
 
--- The subquery itself is being map-joined. Multiple partitions of tbl1 with 
different schemas are being read for tbl2
+-- The subquery itself is being map-joined. Multiple partitions of tbl1_n8 
with different schemas are being read for tbl2_n7
 select /*+mapjoin(subq1)*/ count(*) from 
-  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+  (select a.key as key, a.value as value from tbl1_n8 a where key < 6) subq1 
     join
-  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  (select a.key as key, a.value as value from tbl2_n7 a where key < 6) subq2
   on subq1.key = subq2.key;
 
 set hive.optimize.bucketmapjoin = true;
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
 
 -- The subquery itself is being map-joined. Since the sub-query only contains 
selects and filters, it should 
--- be converted to a bucketized mapside join. Multiple partitions of tbl1 with 
different schemas are being read for each
--- bucket of tbl2
+-- be converted to a bucketized mapside join. Multiple partitions of tbl1_n8 
with different schemas are being read for each
+-- bucket of tbl2_n7
 select /*+mapjoin(subq1)*/ count(*) from 
-  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+  (select a.key as key, a.value as value from tbl1_n8 a where key < 6) subq1 
     join
-  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  (select a.key as key, a.value as value from tbl2_n7 a where key < 6) subq2
   on subq1.key = subq2.key;
 
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 
 -- The subquery itself is being map-joined. Since the sub-query only contains 
selects and filters, it should 
--- be converted to a sort-merge join. Multiple partitions of tbl1 with 
different schemas are being read for a
--- given file of tbl2
+-- be converted to a sort-merge join. Multiple partitions of tbl1_n8 with 
different schemas are being read for a
+-- given file of tbl2_n7
 select /*+mapjoin(subq1)*/ count(*) from 
-  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+  (select a.key as key, a.value as value from tbl1_n8 a where key < 6) subq1 
     join
-  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  (select a.key as key, a.value as value from tbl2_n7 a where key < 6) subq2
   on subq1.key = subq2.key;
 
 -- Since the join key is modified by the sub-query, neither sort-merge join 
not bucketized map-side
--- join should be performed.  Multiple partitions of tbl1 with different 
schemas are being read for tbl2
+-- join should be performed.  Multiple partitions of tbl1_n8 with different 
schemas are being read for tbl2_n7
 select /*+mapjoin(subq1)*/ count(*) from 
-  (select a.key+1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 
+  (select a.key+1 as key, concat(a.value, a.value) as value from tbl1_n8 a) 
subq1 
     join
-  (select a.key+1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
+  (select a.key+1 as key, concat(a.value, a.value) as value from tbl2_n7 a) 
subq2
   on subq1.key = subq2.key;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q 
b/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
index ad2f068..a652ca3 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q
@@ -2,26 +2,26 @@
 set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 
 -- This tests that the schema can be changed for binary serde data
-create table partition_test_partitioned(key string, value string)
+create table partition_test_partitioned_n6(key string, value string)
 partitioned by (dt string) stored as rcfile;
-insert overwrite table partition_test_partitioned partition(dt='1')
+insert overwrite table partition_test_partitioned_n6 partition(dt='1')
 select * from src where key = 238;
 
-select * from partition_test_partitioned where dt is not null;
-select key+key, value from partition_test_partitioned where dt is not null;
+select * from partition_test_partitioned_n6 where dt is not null;
+select key+key, value from partition_test_partitioned_n6 where dt is not null;
 set hive.metastore.disallow.incompatible.col.type.changes=false;
-alter table partition_test_partitioned change key key int;
+alter table partition_test_partitioned_n6 change key key int;
 reset hive.metastore.disallow.incompatible.col.type.changes;
-select key+key, value from partition_test_partitioned where dt is not null;
-select * from partition_test_partitioned where dt is not null;
+select key+key, value from partition_test_partitioned_n6 where dt is not null;
+select * from partition_test_partitioned_n6 where dt is not null;
 
-alter table partition_test_partitioned add columns (value2 string);
+alter table partition_test_partitioned_n6 add columns (value2 string);
 
-select key+key, value from partition_test_partitioned where dt is not null;
-select * from partition_test_partitioned where dt is not null;
+select key+key, value from partition_test_partitioned_n6 where dt is not null;
+select * from partition_test_partitioned_n6 where dt is not null;
 
-insert overwrite table partition_test_partitioned partition(dt='2')
+insert overwrite table partition_test_partitioned_n6 partition(dt='2')
 select key, value, value from src where key = 86;
 
-select key+key, value, value2, dt from partition_test_partitioned where dt is 
not null;
-select * from partition_test_partitioned where dt is not null;
+select key+key, value, value2, dt from partition_test_partitioned_n6 where dt 
is not null;
+select * from partition_test_partitioned_n6 where dt is not null;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q 
b/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
index a97619f..703b214 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q
@@ -2,26 +2,26 @@
 set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 
 -- This tests that the schema can be changed for binary serde data
-create table partition_test_partitioned(key string, value string)
+create table partition_test_partitioned_n10(key string, value string)
 partitioned by (dt string) stored as textfile;
-insert overwrite table partition_test_partitioned partition(dt='1')
+insert overwrite table partition_test_partitioned_n10 partition(dt='1')
 select * from src where key = 238;
 
-select * from partition_test_partitioned where dt is not null;
-select key+key, value from partition_test_partitioned where dt is not null;
+select * from partition_test_partitioned_n10 where dt is not null;
+select key+key, value from partition_test_partitioned_n10 where dt is not null;
 set hive.metastore.disallow.incompatible.col.type.changes=false;
-alter table partition_test_partitioned change key key int;
+alter table partition_test_partitioned_n10 change key key int;
 reset hive.metastore.disallow.incompatible.col.type.changes;
-select key+key, value from partition_test_partitioned where dt is not null;
-select * from partition_test_partitioned where dt is not null;
+select key+key, value from partition_test_partitioned_n10 where dt is not null;
+select * from partition_test_partitioned_n10 where dt is not null;
 
-alter table partition_test_partitioned add columns (value2 string);
+alter table partition_test_partitioned_n10 add columns (value2 string);
 
-select key+key, value from partition_test_partitioned where dt is not null;
-select * from partition_test_partitioned where dt is not null;
+select key+key, value from partition_test_partitioned_n10 where dt is not null;
+select * from partition_test_partitioned_n10 where dt is not null;
 
-insert overwrite table partition_test_partitioned partition(dt='2')
+insert overwrite table partition_test_partitioned_n10 partition(dt='2')
 select key, value, value from src where key = 86;
 
-select key+key, value, value2, dt from partition_test_partitioned where dt is 
not null;
-select * from partition_test_partitioned where dt is not null;
+select key+key, value, value2, dt from partition_test_partitioned_n10 where dt 
is not null;
+select * from partition_test_partitioned_n10 where dt is not null;

Reply via email to