http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/input_testxpath.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input_testxpath.q 
b/ql/src/test/queries/clientpositive/input_testxpath.q
old mode 100755
new mode 100644
index 368feb6..ddbbed4
--- a/ql/src/test/queries/clientpositive/input_testxpath.q
+++ b/ql/src/test/queries/clientpositive/input_testxpath.q
@@ -1,11 +1,11 @@
 --! qt:dataset:src_thrift
-CREATE TABLE dest1(key INT, value STRING, mapvalue STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n6(key INT, value STRING, mapvalue STRING) STORED AS 
TEXTFILE;
 
 EXPLAIN
 FROM src_thrift
-INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint[1], 
src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'];
+INSERT OVERWRITE TABLE dest1_n6 SELECT src_thrift.lint[1], 
src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'];
 
 FROM src_thrift
-INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint[1], 
src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'];
+INSERT OVERWRITE TABLE dest1_n6 SELECT src_thrift.lint[1], 
src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'];
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n6.* FROM dest1_n6;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/input_testxpath2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input_testxpath2.q 
b/ql/src/test/queries/clientpositive/input_testxpath2.q
index 7c7b1fe..bdb1a8c 100644
--- a/ql/src/test/queries/clientpositive/input_testxpath2.q
+++ b/ql/src/test/queries/clientpositive/input_testxpath2.q
@@ -1,11 +1,11 @@
 --! qt:dataset:src_thrift
-CREATE TABLE dest1(lint_size INT, lintstring_size INT, mstringstring_size INT) 
STORED AS TEXTFILE;
+CREATE TABLE dest1_n32(lint_size INT, lintstring_size INT, mstringstring_size 
INT) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src_thrift
-INSERT OVERWRITE TABLE dest1 SELECT size(src_thrift.lint), 
size(src_thrift.lintstring), size(src_thrift.mstringstring) where 
src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL);
+INSERT OVERWRITE TABLE dest1_n32 SELECT size(src_thrift.lint), 
size(src_thrift.lintstring), size(src_thrift.mstringstring) where 
src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL);
 
 FROM src_thrift
-INSERT OVERWRITE TABLE dest1 SELECT size(src_thrift.lint), 
size(src_thrift.lintstring), size(src_thrift.mstringstring) where 
src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL);
+INSERT OVERWRITE TABLE dest1_n32 SELECT size(src_thrift.lint), 
size(src_thrift.lintstring), size(src_thrift.mstringstring) where 
src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL);
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n32.* FROM dest1_n32;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/inputddl7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/inputddl7.q 
b/ql/src/test/queries/clientpositive/inputddl7.q
index 27e587a..6f775c1 100644
--- a/ql/src/test/queries/clientpositive/inputddl7.q
+++ b/ql/src/test/queries/clientpositive/inputddl7.q
@@ -2,29 +2,29 @@
 -- test for loading into partitions with the correct file format
 
 
-CREATE TABLE T1(name STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1;
-SELECT COUNT(1) FROM T1;
+CREATE TABLE T1_n117(name STRING) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1_n117;
+SELECT COUNT(1) FROM T1_n117;
 
 
-CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE;
-LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T2;
-SELECT COUNT(1) FROM T2;
+CREATE TABLE T2_n69(name STRING) STORED AS SEQUENCEFILE;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T2_n69;
+SELECT COUNT(1) FROM T2_n69;
 
 
-CREATE TABLE T3(name STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3 PARTITION 
(ds='2008-04-09');
-SELECT COUNT(1) FROM T3 where T3.ds='2008-04-09';
+CREATE TABLE T3_n25(name STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3_n25 PARTITION 
(ds='2008-04-09');
+SELECT COUNT(1) FROM T3_n25 where T3_n25.ds='2008-04-09';
 
 
-CREATE TABLE T4(name STRING) PARTITIONED BY(ds STRING) STORED AS SEQUENCEFILE;
-LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T4 PARTITION 
(ds='2008-04-09');
-SELECT COUNT(1) FROM T4 where T4.ds='2008-04-09';
+CREATE TABLE T4_n14(name STRING) PARTITIONED BY(ds STRING) STORED AS 
SEQUENCEFILE;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T4_n14 PARTITION 
(ds='2008-04-09');
+SELECT COUNT(1) FROM T4_n14 where T4_n14.ds='2008-04-09';
 
-DESCRIBE EXTENDED T1;
-DESCRIBE EXTENDED T2;
-DESCRIBE EXTENDED T3 PARTITION (ds='2008-04-09');
-DESCRIBE EXTENDED T4 PARTITION (ds='2008-04-09');
+DESCRIBE EXTENDED T1_n117;
+DESCRIBE EXTENDED T2_n69;
+DESCRIBE EXTENDED T3_n25 PARTITION (ds='2008-04-09');
+DESCRIBE EXTENDED T4_n14 PARTITION (ds='2008-04-09');
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/insert0.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert0.q 
b/ql/src/test/queries/clientpositive/insert0.q
index 85f73a0..fcb33ad 100644
--- a/ql/src/test/queries/clientpositive/insert0.q
+++ b/ql/src/test/queries/clientpositive/insert0.q
@@ -2,19 +2,19 @@
 set hive.mapred.mode=nonstrict;
 set hive.cbo.enable=true;
 
-DROP TABLE insert_into1;
+DROP TABLE insert_into1_n1;
 DROP TABLE ctas_table;
 DROP TABLE ctas_part;
 
-CREATE TABLE insert_into1 (key int, value string);
+CREATE TABLE insert_into1_n1 (key int, value string);
 
-INSERT OVERWRITE TABLE insert_into1 SELECT * from src ORDER BY key LIMIT 10;
+INSERT OVERWRITE TABLE insert_into1_n1 SELECT * from src ORDER BY key LIMIT 10;
 
-select * from insert_into1 order by key;
+select * from insert_into1_n1 order by key;
 
-INSERT INTO TABLE insert_into1 SELECT * from src ORDER BY key DESC LIMIT 10;
+INSERT INTO TABLE insert_into1_n1 SELECT * from src ORDER BY key DESC LIMIT 10;
 
-select * from insert_into1 order by key;
+select * from insert_into1_n1 order by key;
 
 create table ctas_table as SELECT key, count(value) as foo from src GROUP BY 
key LIMIT 10;
 
@@ -35,6 +35,6 @@ select * from ctas_part order by key;
 
 
 
-DROP TABLE insert_into1;
+DROP TABLE insert_into1_n1;
 DROP TABLE ctas_table;
 DROP TABLE ctas_part;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/insert1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert1.q 
b/ql/src/test/queries/clientpositive/insert1.q
index 91daf7e..f9accc0 100644
--- a/ql/src/test/queries/clientpositive/insert1.q
+++ b/ql/src/test/queries/clientpositive/insert1.q
@@ -33,7 +33,7 @@ SELECT * FROM result;
 
 USE default;
 CREATE DATABASE db1;
-CREATE TABLE db1.result(col1 STRING);
-INSERT OVERWRITE TABLE db1.result SELECT 'db1_insert1' FROM src LIMIT 1;
-INSERT INTO TABLE db1.result SELECT 'db1_insert2' FROM src LIMIT 1;
-SELECT * FROM db1.result;
+CREATE TABLE db1.result_n0(col1 STRING);
+INSERT OVERWRITE TABLE db1.result_n0 SELECT 'db1_insert1' FROM src LIMIT 1;
+INSERT INTO TABLE db1.result_n0 SELECT 'db1_insert2' FROM src LIMIT 1;
+SELECT * FROM db1.result_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q 
b/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q
index b4058d7..612b227 100644
--- a/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q
+++ b/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q
@@ -3,8 +3,8 @@ set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
 
-create table acid_notbucketed(a int, b varchar(128)) stored as orc;
+create table acid_notbucketed_n0(a int, b varchar(128)) stored as orc;
 
-insert into table acid_notbucketed select cint, cast(cstring1 as varchar(128)) 
from alltypesorc where cint is not null order by cint limit 10;
+insert into table acid_notbucketed_n0 select cint, cast(cstring1 as 
varchar(128)) from alltypesorc where cint is not null order by cint limit 10;
 
-select * from acid_notbucketed;
+select * from acid_notbucketed_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/insert_into_default_keyword.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_into_default_keyword.q 
b/ql/src/test/queries/clientpositive/insert_into_default_keyword.q
index 2e92e91..ebef1a4 100644
--- a/ql/src/test/queries/clientpositive/insert_into_default_keyword.q
+++ b/ql/src/test/queries/clientpositive/insert_into_default_keyword.q
@@ -2,102 +2,102 @@ set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 -- SORT_QUERY_RESULTS
 
-DROP TABLE insert_into1;
+DROP TABLE insert_into1_n0;
 
 -- No default constraint
-CREATE TABLE insert_into1 (key int, value string)
+CREATE TABLE insert_into1_n0 (key int, value string)
     clustered by (key) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true');
 
-EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT);
-INSERT INTO TABLE insert_into1 values(default, DEFAULT);
-SELECT * from insert_into1;
-TRUNCATE table insert_into1;
+EXPLAIN INSERT INTO TABLE insert_into1_n0 values(default, DEFAULT);
+INSERT INTO TABLE insert_into1_n0 values(default, DEFAULT);
+SELECT * from insert_into1_n0;
+TRUNCATE table insert_into1_n0;
 
 -- should be able to use any case for DEFAULT
-EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt);
-INSERT INTO TABLE insert_into1 values(234, dEfAULt);
-SELECT * from insert_into1;
-TRUNCATE table insert_into1;
+EXPLAIN INSERT INTO TABLE insert_into1_n0 values(234, dEfAULt);
+INSERT INTO TABLE insert_into1_n0 values(234, dEfAULt);
+SELECT * from insert_into1_n0;
+TRUNCATE table insert_into1_n0;
 
 -- multi values
-explain insert into insert_into1 values(default, 3),(2,default);
-insert into insert_into1 values(default, 3),(2,default);
-select * from insert_into1;
-TRUNCATE table insert_into1;
+explain insert into insert_into1_n0 values(default, 3),(2,default);
+insert into insert_into1_n0 values(default, 3),(2,default);
+select * from insert_into1_n0;
+TRUNCATE table insert_into1_n0;
 
 --with column schema
-EXPLAIN INSERT INTO TABLE insert_into1(key) values(default);
-INSERT INTO TABLE insert_into1(key) values(default);
-select * from insert_into1;
-TRUNCATE table insert_into1;
+EXPLAIN INSERT INTO TABLE insert_into1_n0(key) values(default);
+INSERT INTO TABLE insert_into1_n0(key) values(default);
+select * from insert_into1_n0;
+TRUNCATE table insert_into1_n0;
 
-EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default);
-INSERT INTO TABLE insert_into1(key, value) values(2,default);
-select * from insert_into1;
-TRUNCATE table insert_into1;
+EXPLAIN INSERT INTO TABLE insert_into1_n0(key, value) values(2,default);
+INSERT INTO TABLE insert_into1_n0(key, value) values(2,default);
+select * from insert_into1_n0;
+TRUNCATE table insert_into1_n0;
 
-DROP TABLE insert_into1;
+DROP TABLE insert_into1_n0;
 
 -- with default constraint
-CREATE TABLE insert_into1 (key int DEFAULT 1, value string)
+CREATE TABLE insert_into1_n0 (key int DEFAULT 1, value string)
     clustered by (key) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true');
-EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT);
-INSERT INTO TABLE insert_into1 values(default, DEFAULT);
-SELECT * from insert_into1;
-TRUNCATE table insert_into1;
+EXPLAIN INSERT INTO TABLE insert_into1_n0 values(default, DEFAULT);
+INSERT INTO TABLE insert_into1_n0 values(default, DEFAULT);
+SELECT * from insert_into1_n0;
+TRUNCATE table insert_into1_n0;
 
 -- should be able to use any case for DEFAULT
-EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt);
-INSERT INTO TABLE insert_into1 values(234, dEfAULt);
-SELECT * from insert_into1;
-TRUNCATE table insert_into1;
+EXPLAIN INSERT INTO TABLE insert_into1_n0 values(234, dEfAULt);
+INSERT INTO TABLE insert_into1_n0 values(234, dEfAULt);
+SELECT * from insert_into1_n0;
+TRUNCATE table insert_into1_n0;
 
 -- multi values
-explain insert into insert_into1 values(default, 3),(2,default);
-insert into insert_into1 values(default, 3),(2,default);
-select * from insert_into1;
-TRUNCATE table insert_into1;
+explain insert into insert_into1_n0 values(default, 3),(2,default);
+insert into insert_into1_n0 values(default, 3),(2,default);
+select * from insert_into1_n0;
+TRUNCATE table insert_into1_n0;
 
 --with column schema
-EXPLAIN INSERT INTO TABLE insert_into1(key) values(default);
-INSERT INTO TABLE insert_into1(key) values(default);
-select * from insert_into1;
-TRUNCATE table insert_into1;
+EXPLAIN INSERT INTO TABLE insert_into1_n0(key) values(default);
+INSERT INTO TABLE insert_into1_n0(key) values(default);
+select * from insert_into1_n0;
+TRUNCATE table insert_into1_n0;
 
-EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default);
-INSERT INTO TABLE insert_into1(key, value) values(2,default);
-select * from insert_into1;
-TRUNCATE table insert_into1;
+EXPLAIN INSERT INTO TABLE insert_into1_n0(key, value) values(2,default);
+INSERT INTO TABLE insert_into1_n0(key, value) values(2,default);
+select * from insert_into1_n0;
+TRUNCATE table insert_into1_n0;
 
-EXPLAIN INSERT INTO TABLE insert_into1(value, key) values(2,default);
-INSERT INTO TABLE insert_into1(value, key) values(2,default);
-select * from insert_into1;
-TRUNCATE table insert_into1;
+EXPLAIN INSERT INTO TABLE insert_into1_n0(value, key) values(2,default);
+INSERT INTO TABLE insert_into1_n0(value, key) values(2,default);
+select * from insert_into1_n0;
+TRUNCATE table insert_into1_n0;
 
-EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, 
default);
-INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, 
default);
-select * from insert_into1;
-TRUNCATE table insert_into1;
-DROP TABLE insert_into1;
+EXPLAIN INSERT INTO TABLE insert_into1_n0(key, value) 
values(2,default),(DEFAULT, default);
+INSERT INTO TABLE insert_into1_n0(key, value) values(2,default),(DEFAULT, 
default);
+select * from insert_into1_n0;
+TRUNCATE table insert_into1_n0;
+DROP TABLE insert_into1_n0;
 
 
 -- UPDATE
-CREATE TABLE insert_into1 (key int DEFAULT 1, value string, i int)
+CREATE TABLE insert_into1_n0 (key int DEFAULT 1, value string, i int)
     clustered by (i) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true');
 
-INSERT INTO insert_into1 values(2,1, 45);
-EXPLAIN UPDATE insert_into1 set key = DEFAULT where value=1;
-UPDATE insert_into1 set key = DEFAULT where value=1;
-SELECT * from insert_into1;
-TRUNCATE table insert_into1;
+INSERT INTO insert_into1_n0 values(2,1, 45);
+EXPLAIN UPDATE insert_into1_n0 set key = DEFAULT where value=1;
+UPDATE insert_into1_n0 set key = DEFAULT where value=1;
+SELECT * from insert_into1_n0;
+TRUNCATE table insert_into1_n0;
 
-INSERT INTO insert_into1 values(2,1, 45);
-EXPLAIN UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1;
-UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1;
-SELECT * from insert_into1;
-TRUNCATE table insert_into1;
+INSERT INTO insert_into1_n0 values(2,1, 45);
+EXPLAIN UPDATE insert_into1_n0 set key = DEFAULT, value=DEFAULT where value=1;
+UPDATE insert_into1_n0 set key = DEFAULT, value=DEFAULT where value=1;
+SELECT * from insert_into1_n0;
+TRUNCATE table insert_into1_n0;
 
-DROP TABLE insert_into1;
+DROP TABLE insert_into1_n0;
 
 -- partitioned table
 CREATE TABLE tpart(i int, j int DEFAULT 1001) partitioned by (ds string);
@@ -120,9 +120,9 @@ set hive.mapred.mode=nonstrict;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
-create table nonacid (key int, a1 string, value string) stored as orc;
-insert into nonacid values(1, 'a11', 'val1');
-insert into nonacid values(2, 'a12', 'val2');
+create table nonacid_n1 (key int, a1 string, value string) stored as orc;
+insert into nonacid_n1 values(1, 'a11', 'val1');
+insert into nonacid_n1 values(2, 'a12', 'val2');
 
 create table acidTable(key int NOT NULL enable, a1 string DEFAULT 'a1', value 
string)
 clustered by (value) into 2 buckets stored as orc
@@ -130,21 +130,21 @@ tblproperties ("transactional"="true");
 insert into acidTable values(1, 'a10','val100');
 
 -- only insert
-explain MERGE INTO acidTable as t using nonacid as s ON t.key = s.key
+explain MERGE INTO acidTable as t using nonacid_n1 as s ON t.key = s.key
 WHEN NOT MATCHED THEN INSERT VALUES (s.key, DEFAULT, DEFAULT);
 
-MERGE INTO acidTable as t using nonacid as s ON t.key = s.key
+MERGE INTO acidTable as t using nonacid_n1 as s ON t.key = s.key
 WHEN NOT MATCHED THEN INSERT VALUES (s.key, DEFAULT, DEFAULT);
 select * from acidTable;
 truncate table acidTable;
 insert into acidTable values(1, 'a10','val100');
 
 -- insert + update + delete
-explain MERGE INTO acidTable as t using nonacid as s ON t.key = s.key
+explain MERGE INTO acidTable as t using nonacid_n1 as s ON t.key = s.key
 WHEN MATCHED AND s.key < 3 THEN DELETE
 WHEN MATCHED AND s.key > 3 THEN UPDATE set a1 = DEFAULT
 WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.a1, DEFAULT);
-MERGE INTO acidTable as t using nonacid as s ON t.key = s.key
+MERGE INTO acidTable as t using nonacid_n1 as s ON t.key = s.key
 WHEN MATCHED AND s.key < 3 THEN DELETE
 WHEN MATCHED AND s.key > 3 THEN UPDATE set a1 = DEFAULT
 WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.a1, DEFAULT);
@@ -154,12 +154,12 @@ truncate table acidTable;
 create table acidTable2(key int DEFAULT 404) clustered by (key) into 2 buckets 
stored as orc
 tblproperties ("transactional"="true");
 
-explain MERGE INTO acidTable2 as t using nonacid as s ON t.key = s.key
+explain MERGE INTO acidTable2 as t using nonacid_n1 as s ON t.key = s.key
 WHEN NOT MATCHED THEN INSERT VALUES (DEFAULT);
-MERGE INTO acidTable2 as t using nonacid as s ON t.key = s.key
+MERGE INTO acidTable2 as t using nonacid_n1 as s ON t.key = s.key
 WHEN NOT MATCHED THEN INSERT VALUES (DEFAULT);
 select * from acidTable2;
 
 DROP TABLE acidTable;
 DROP TABLE acidTable2;
-DROP TABLE nonacid;
\ No newline at end of file
+DROP TABLE nonacid_n1;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/insert_into_with_schema.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_into_with_schema.q 
b/ql/src/test/queries/clientpositive/insert_into_with_schema.q
index b2d9d06..21f1650 100644
--- a/ql/src/test/queries/clientpositive/insert_into_with_schema.q
+++ b/ql/src/test/queries/clientpositive/insert_into_with_schema.q
@@ -4,40 +4,40 @@ set hive.mapred.mode=nonstrict;
 drop database if exists x314 cascade;
 create database x314;
 use x314;
-create table source(s1 int, s2 int);
+create table source_n0(s1 int, s2 int);
 create table target1(x int, y int, z int);
 create table target2(x int, y int, z int);
 create table target3(x int, y int, z int);
 
-insert into source(s2,s1) values(2,1);
--- expect source to contain 1 row (1,2)
-select * from source;
-insert into target1(z,x) select * from source;
+insert into source_n0(s2,s1) values(2,1);
+-- expect source_n0 to contain 1 row (1,2)
+select * from source_n0;
+insert into target1(z,x) select * from source_n0;
 -- expect target1 to contain 1 row (2,NULL,1)
 select * from target1;
 
 -- note that schema spec for target1 and target2 are different
-from source insert into target1(x,y) select * insert into target2(x,z) select 
s2,s1;
+from source_n0 insert into target1(x,y) select * insert into target2(x,z) 
select s2,s1;
 --expect target1 to have 2rows (2,NULL,1), (1,2,NULL)
 select * from target1 order by x,y,z;
 -- expect target2 to have 1 row: (2,NULL,1)
 select * from target2;
 
 
-from source insert into target1(x,y,z) select null as x, * insert into 
target2(x,y,z) select null as x, source.*;
+from source_n0 insert into target1(x,y,z) select null as x, * insert into 
target2(x,y,z) select null as x, source_n0.*;
 -- expect target1 to have 3 rows: (2,NULL,1), (1,2,NULL), (NULL, 1,2)
 select * from target1 order by x,y,z;
 -- expect target2 to have 2 rows: (2,NULL,1), (NULL, 1,2)
 select * from target2 order by x,y,z;
 
 create table source2(s1 int, s2 int);
-insert into target3 (x,z) select source.s1,source2.s2 from source left outer 
join source2 on source.s1=source2.s2;
+insert into target3 (x,z) select source_n0.s1,source2.s2 from source_n0 left 
outer join source2 on source_n0.s1=source2.s2;
 --expect target3 to have 1 row (1,NULL,NULL)
 select * from target3;
 
 
 -- partitioned tables
-CREATE TABLE pageviews (userid VARCHAR(64), link STRING, source STRING) 
PARTITIONED BY (datestamp STRING, i int) CLUSTERED BY (userid) INTO 4 BUCKETS 
STORED AS ORC;
+CREATE TABLE pageviews (userid VARCHAR(64), link STRING, source_n0 STRING) 
PARTITIONED BY (datestamp STRING, i int) CLUSTERED BY (userid) INTO 4 BUCKETS 
STORED AS ORC;
 INSERT INTO TABLE pageviews PARTITION (datestamp = '2014-09-23', i = 
1)(userid,link) VALUES ('jsmith', 'mail.com');
 -- expect 1 row: ('jsmith', 'mail.com', NULL) in partition '2014-09-23'/'1'
 select * from pageviews;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/insert_into_with_schema2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_into_with_schema2.q 
b/ql/src/test/queries/clientpositive/insert_into_with_schema2.q
index 032e6ae..9d6fb29 100644
--- a/ql/src/test/queries/clientpositive/insert_into_with_schema2.q
+++ b/ql/src/test/queries/clientpositive/insert_into_with_schema2.q
@@ -3,17 +3,17 @@ set hive.mapred.mode=nonstrict;
 
 
 
-create table studenttab10k (age2 int);
-insert into studenttab10k values(1);
+create table studenttab10k_n0 (age2 int);
+insert into studenttab10k_n0 values(1);
 
 create table student_acid (age int, grade int)
  clustered by (age) into 1 buckets;
 
-insert into student_acid(age) select * from studenttab10k;
+insert into student_acid(age) select * from studenttab10k_n0;
 
 select * from student_acid;
 
-insert into student_acid(grade, age) select 3 g, * from studenttab10k;
+insert into student_acid(grade, age) select 3 g, * from studenttab10k_n0;
 
 select * from student_acid;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q 
b/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q
index 10a1d68..90612a6 100644
--- a/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q
+++ b/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q
@@ -9,7 +9,7 @@ create table sample_06(name varchar(50), age int, gpa 
decimal(3, 2)) clustered b
 insert into table sample_06 values ('aaa', 35, 3.00), ('bbb', 32, 3.00), 
('ccc', 32, 3.00), ('ddd', 35, 3.00), ('eee', 32, 3.00); 
 select * from sample_06 where gpa = 3.00;
 
-create table tab1 (name varchar(50), age int, gpa decimal(3, 2));
-insert into table tab1 select * from sample_06 where gpa = 3.00;
-select * from tab1;
+create table tab1_n2 (name varchar(50), age int, gpa decimal(3, 2));
+insert into table tab1_n2 select * from sample_06 where gpa = 3.00;
+select * from tab1_n2;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/insert_overwrite_directory.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_overwrite_directory.q 
b/ql/src/test/queries/clientpositive/insert_overwrite_directory.q
index 1180589..15a00f3 100644
--- a/ql/src/test/queries/clientpositive/insert_overwrite_directory.q
+++ b/ql/src/test/queries/clientpositive/insert_overwrite_directory.q
@@ -10,22 +10,22 @@ select * from src ;
 
 dfs -cat ../../data/files/src_table_2/000000_0;
 
-create table array_table (a array<string>, b array<string>)
+create table array_table_n1 (a array<string>, b array<string>)
 ROW FORMAT DELIMITED
 FIELDS TERMINATED BY '\t'
 COLLECTION ITEMS TERMINATED BY ',';
 
-load data local inpath "../../data/files/array_table.txt" overwrite into table 
array_table;
+load data local inpath "../../data/files/array_table.txt" overwrite into table 
array_table_n1;
 
 insert overwrite directory '../../data/files/array_table_1'
-select * from array_table;
+select * from array_table_n1;
 dfs -cat ../../data/files/array_table_1/000000_0;
 
 insert overwrite directory '../../data/files/array_table_2'
 ROW FORMAT DELIMITED
 FIELDS TERMINATED BY ':'
 COLLECTION ITEMS TERMINATED BY '#'
-select * from array_table;
+select * from array_table_n1;
 
 dfs -cat ../../data/files/array_table_2/000000_0;
 
@@ -33,22 +33,22 @@ insert overwrite directory 
'../../data/files/array_table_2_withfields'
 ROW FORMAT DELIMITED
 FIELDS TERMINATED BY ':'
 COLLECTION ITEMS TERMINATED BY '#'
-select b,a from array_table;
+select b,a from array_table_n1;
 
 dfs -cat ../../data/files/array_table_2_withfields/000000_0;
 
 
-create table map_table (foo STRING , bar MAP<STRING, STRING>)
+create table map_table_n2 (foo STRING , bar MAP<STRING, STRING>)
 ROW FORMAT DELIMITED
 FIELDS TERMINATED BY '\t'
 COLLECTION ITEMS TERMINATED BY ','
 MAP KEYS TERMINATED BY ':'
 STORED AS TEXTFILE;
 
-load data local inpath "../../data/files/map_table.txt" overwrite into table 
map_table;
+load data local inpath "../../data/files/map_table.txt" overwrite into table 
map_table_n2;
 
 insert overwrite directory '../../data/files/map_table_1'
-select * from map_table;
+select * from map_table_n2;
 dfs -cat ../../data/files/map_table_1/000000_0;
 
 insert overwrite directory '../../data/files/map_table_2'
@@ -56,7 +56,7 @@ ROW FORMAT DELIMITED
 FIELDS TERMINATED BY ':'
 COLLECTION ITEMS TERMINATED BY '#'
 MAP KEYS TERMINATED BY '='
-select * from map_table;
+select * from map_table_n2;
 
 dfs -cat ../../data/files/map_table_2/000000_0;
 
@@ -65,14 +65,14 @@ ROW FORMAT DELIMITED
 FIELDS TERMINATED BY ':'
 COLLECTION ITEMS TERMINATED BY '#'
 MAP KEYS TERMINATED BY '='
-select bar,foo from map_table;
+select bar,foo from map_table_n2;
 
 dfs -cat ../../data/files/map_table_2_withfields/000000_0;
 
 insert overwrite directory '../../data/files/array_table_3'
 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe'
 STORED AS TEXTFILE
-select * from array_table;
+select * from array_table_n1;
 
 dfs -cat ../../data/files/array_table_3/000000_0;
 
@@ -83,14 +83,14 @@ WITH SERDEPROPERTIES (
 'serialization.format'= 
'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol',
 'quote.delim'= '(\"|\\[|\\])',  'field.delim'=',',
 'serialization.null.format'='-NA-', 'collection.delim'='#') STORED AS TEXTFILE
-select a, null, b from array_table;
+select a, null, b from array_table_n1;
 
 dfs -cat ../../data/files/array_table_4/000000_0;
 
 insert overwrite directory '../../data/files/map_table_3'
 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe'
 STORED AS TEXTFILE
-select * from map_table;
+select * from map_table_n2;
 
 dfs -cat ../../data/files/map_table_3/000000_0;
 
@@ -100,7 +100,7 @@ WITH SERDEPROPERTIES (
 'serialization.format'= 
'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol',
 'quote.delim'= '(\"|\\[|\\])',  'field.delim'=':',
 'serialization.null.format'='-NA-', 'collection.delim'='#', 
'mapkey.delim'='%') STORED AS TEXTFILE
-select foo, null, bar from map_table;
+select foo, null, bar from map_table_n2;
 
 dfs -cat ../../data/files/map_table_4/000000_0;
 
@@ -125,8 +125,8 @@ select key,value from rctable;
 dfs -cat ../../data/files/rctable_out/000000_0;
 
 drop table rctable;
-drop table array_table;
-drop table map_table;
+drop table array_table_n1;
+drop table map_table_n2;
 dfs -rmr ${system:test.tmp.dir}/rctable;
 dfs -rmr ../../data/files/array_table_1;
 dfs -rmr ../../data/files/array_table_2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/insert_overwrite_directory2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_overwrite_directory2.q 
b/ql/src/test/queries/clientpositive/insert_overwrite_directory2.q
index b58fb44..c8d8b1c 100644
--- a/ql/src/test/queries/clientpositive/insert_overwrite_directory2.q
+++ b/ql/src/test/queries/clientpositive/insert_overwrite_directory2.q
@@ -1,18 +1,18 @@
 --! qt:dataset:src
-create external table result(key string) location 
"${system:test.tmp.dir}/result";
+create external table result_n0(key string) location 
"${system:test.tmp.dir}/result_n0";
 
 set mapreduce.job.reduces=2;
 
-insert overwrite directory "${system:test.tmp.dir}/result"
+insert overwrite directory "${system:test.tmp.dir}/result_n0"
 select key from src group by key;
 
-select count(*) from result;
+select count(*) from result_n0;
 
 set mapreduce.job.reduces=1;
 
-insert overwrite directory "${system:test.tmp.dir}/result"
+insert overwrite directory "${system:test.tmp.dir}/result_n0"
 select key from src group by key;
 
-select count(*) from result;
+select count(*) from result_n0;
 
-drop table result;
\ No newline at end of file
+drop table result_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/insert_overwrite_local_directory_1.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/insert_overwrite_local_directory_1.q 
b/ql/src/test/queries/clientpositive/insert_overwrite_local_directory_1.q
index 9fd8d15..ce7912c 100644
--- a/ql/src/test/queries/clientpositive/insert_overwrite_local_directory_1.q
+++ b/ql/src/test/queries/clientpositive/insert_overwrite_local_directory_1.q
@@ -10,22 +10,22 @@ select * from src ;
 
 dfs -cat ../../data/files/local_src_table_2/000000_0;
 
-create table array_table (a array<string>, b array<string>)
+create table array_table_n0 (a array<string>, b array<string>)
 ROW FORMAT DELIMITED
 FIELDS TERMINATED BY '\t'
 COLLECTION ITEMS TERMINATED BY ',';
 
-load data local inpath "../../data/files/array_table.txt" overwrite into table 
array_table;
+load data local inpath "../../data/files/array_table.txt" overwrite into table 
array_table_n0;
 
 insert overwrite local directory '../../data/files/local_array_table_1'
-select * from array_table;
+select * from array_table_n0;
 dfs -cat ../../data/files/local_array_table_1/000000_0;
 
 insert overwrite local directory '../../data/files/local_array_table_2'
 ROW FORMAT DELIMITED
 FIELDS TERMINATED BY ':'
 COLLECTION ITEMS TERMINATED BY '#'
-select * from array_table;
+select * from array_table_n0;
 
 dfs -cat ../../data/files/local_array_table_2/000000_0;
 
@@ -33,22 +33,22 @@ insert overwrite local directory 
'../../data/files/local_array_table_2_withfield
 ROW FORMAT DELIMITED
 FIELDS TERMINATED BY ':'
 COLLECTION ITEMS TERMINATED BY '#'
-select b,a from array_table;
+select b,a from array_table_n0;
 
 dfs -cat ../../data/files/local_array_table_2_withfields/000000_0;
 
 
-create table map_table (foo STRING , bar MAP<STRING, STRING>)
+create table map_table_n1 (foo STRING , bar MAP<STRING, STRING>)
 ROW FORMAT DELIMITED
 FIELDS TERMINATED BY '\t'
 COLLECTION ITEMS TERMINATED BY ','
 MAP KEYS TERMINATED BY ':'
 STORED AS TEXTFILE;
 
-load data local inpath "../../data/files/map_table.txt" overwrite into table 
map_table;
+load data local inpath "../../data/files/map_table.txt" overwrite into table 
map_table_n1;
 
 insert overwrite local directory '../../data/files/local_map_table_1'
-select * from map_table;
+select * from map_table_n1;
 dfs -cat ../../data/files/local_map_table_1/000000_0;
 
 insert overwrite local directory '../../data/files/local_map_table_2'
@@ -56,7 +56,7 @@ ROW FORMAT DELIMITED
 FIELDS TERMINATED BY ':'
 COLLECTION ITEMS TERMINATED BY '#'
 MAP KEYS TERMINATED BY '='
-select * from map_table;
+select * from map_table_n1;
 
 dfs -cat ../../data/files/local_map_table_2/000000_0;
 
@@ -65,21 +65,21 @@ ROW FORMAT DELIMITED
 FIELDS TERMINATED BY ':'
 COLLECTION ITEMS TERMINATED BY '#'
 MAP KEYS TERMINATED BY '='
-select bar,foo from map_table;
+select bar,foo from map_table_n1;
 
 dfs -cat ../../data/files/local_map_table_2_withfields/000000_0;
 
 insert overwrite local directory '../../data/files/local_array_table_3'
 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe'
 STORED AS TEXTFILE
-select * from array_table;
+select * from array_table_n0;
 
 dfs -cat ../../data/files/local_array_table_3/000000_0;
 
 insert overwrite local directory '../../data/files/local_map_table_3'
 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe'
 STORED AS TEXTFILE
-select * from map_table;
+select * from map_table_n1;
 
 dfs -cat ../../data/files/local_map_table_3/000000_0;
 
@@ -104,7 +104,7 @@ select key,value from local_rctable;
 dfs -cat ../../data/files/local_rctable_out/000000_0;
 
 drop table local_rctable;
-drop table array_table;
-drop table map_table;
+drop table array_table_n0;
+drop table map_table_n1;
 dfs -rmr ${system:test.tmp.dir}/local_rctable;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/insert_values_orig_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_orig_table.q 
b/ql/src/test/queries/clientpositive/insert_values_orig_table.q
index a0fce90..92a2df4 100644
--- a/ql/src/test/queries/clientpositive/insert_values_orig_table.q
+++ b/ql/src/test/queries/clientpositive/insert_values_orig_table.q
@@ -3,8 +3,8 @@ set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
-drop table if exists acid_ivot_stage;
-create table acid_ivot_stage(
+drop table if exists acid_ivot_stage_n0;
+create table acid_ivot_stage_n0(
     ctinyint TINYINT,
     csmallint SMALLINT,
     cint INT,
@@ -17,9 +17,9 @@ create table acid_ivot_stage(
     ctimestamp2 TIMESTAMP,
     cboolean1 BOOLEAN,
     cboolean2 BOOLEAN) stored as orc;
-LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table 
acid_ivot_stage;
+LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table 
acid_ivot_stage_n0;
 
-create table acid_ivot(
+create table acid_ivot_n0(
     ctinyint TINYINT,
     csmallint SMALLINT,
     cint INT,
@@ -33,13 +33,13 @@ create table acid_ivot(
     cboolean1 BOOLEAN,
     cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc 
TBLPROPERTIES ('transactional'='true');
 
-insert into acid_ivot select * from acid_ivot_stage;
+insert into acid_ivot_n0 select * from acid_ivot_stage_n0;
 
-select count(*) from acid_ivot;
+select count(*) from acid_ivot_n0;
 
-insert into table acid_ivot values
+insert into table acid_ivot_n0 values
         (1, 2, 3, 4, 3.14, 2.34, 'fred', 'bob', '2014-09-01 10:34:23.111', 
'1944-06-06 06:00:00', true, true),
         (111, 222, 3333, 444, 13.14, 10239302.34239320, 'fred', 'bob', 
'2014-09-01 10:34:23.111', '1944-06-06 06:00:00', true, true);
 
-select count(*) from acid_ivot;
+select count(*) from acid_ivot_n0;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/insertexternal1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insertexternal1.q 
b/ql/src/test/queries/clientpositive/insertexternal1.q
index 2e938b0..6e3a235 100644
--- a/ql/src/test/queries/clientpositive/insertexternal1.q
+++ b/ql/src/test/queries/clientpositive/insertexternal1.q
@@ -1,15 +1,15 @@
 --! qt:dataset:src
 
 
-create table texternal(key string, val string) partitioned by (insertdate 
string);
+create table texternal_n0(key string, val string) partitioned by (insertdate 
string);
 
 dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/texternal/temp;
 dfs -rmr ${system:test.tmp.dir}/texternal;
 dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/texternal/2008-01-01;
 
-alter table texternal add partition (insertdate='2008-01-01') location 
'pfile://${system:test.tmp.dir}/texternal/2008-01-01';
-from src insert overwrite table texternal partition (insertdate='2008-01-01') 
select *;
+alter table texternal_n0 add partition (insertdate='2008-01-01') location 
'pfile://${system:test.tmp.dir}/texternal/2008-01-01';
+from src insert overwrite table texternal_n0 partition 
(insertdate='2008-01-01') select *;
 
-select * from texternal where insertdate='2008-01-01';
+select * from texternal_n0 where insertdate='2008-01-01';
 
 dfs -rmr ${system:test.tmp.dir}/texternal;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/intersect_all.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/intersect_all.q 
b/ql/src/test/queries/clientpositive/intersect_all.q
index 35033be..f97a4bc 100644
--- a/ql/src/test/queries/clientpositive/intersect_all.q
+++ b/ql/src/test/queries/clientpositive/intersect_all.q
@@ -2,33 +2,33 @@
 set hive.mapred.mode=nonstrict;
 set hive.cbo.enable=true;
 
-create table a(key int, value int);
+create table a_n10(key int, value int);
 
-insert into table a values (1,2),(1,2),(1,3),(2,3);
+insert into table a_n10 values (1,2),(1,2),(1,3),(2,3);
 
-create table b(key int, value int);
+create table b_n8(key int, value int);
 
-insert into table b values (1,2),(2,3);
+insert into table b_n8 values (1,2),(2,3);
 
-select key, value, count(1) as c from a group by key, value;
+select key, value, count(1) as c from a_n10 group by key, value;
 
-select * from a intersect all select * from b;
+select * from a_n10 intersect all select * from b_n8;
 
-select * from b intersect all select * from a intersect all select * from b;
+select * from b_n8 intersect all select * from a_n10 intersect all select * 
from b_n8;
 
-select * from a intersect all select * from b union all select * from a 
intersect all select * from b;
+select * from a_n10 intersect all select * from b_n8 union all select * from 
a_n10 intersect all select * from b_n8;
 
-select * from a intersect all select * from b union select * from a intersect 
all select * from b;
+select * from a_n10 intersect all select * from b_n8 union select * from a_n10 
intersect all select * from b_n8;
 
-select * from a intersect all select * from b intersect all select * from a 
intersect all select * from b;
+select * from a_n10 intersect all select * from b_n8 intersect all select * 
from a_n10 intersect all select * from b_n8;
 
-select * from (select a.key, b.value from a join b on a.key=b.key)sub1 
+select * from (select a_n10.key, b_n8.value from a_n10 join b_n8 on 
a_n10.key=b_n8.key)sub1 
 intersect all 
-select * from (select a.key, b.value from a join b on a.key=b.key)sub2; 
+select * from (select a_n10.key, b_n8.value from a_n10 join b_n8 on 
a_n10.key=b_n8.key)sub2; 
 
-select * from (select a.key, b.value from a join b on a.key=b.key)sub1
+select * from (select a_n10.key, b_n8.value from a_n10 join b_n8 on 
a_n10.key=b_n8.key)sub1
 intersect all
-select * from (select b.value as key, a.key as value from a join b on 
a.key=b.key)sub2;
+select * from (select b_n8.value as key, a_n10.key as value from a_n10 join 
b_n8 on a_n10.key=b_n8.key)sub2;
 
 explain select * from src intersect all select * from src;
 
@@ -38,6 +38,6 @@ explain select * from src intersect all select * from src 
intersect all select *
 
 select * from src intersect all select * from src intersect all select * from 
src intersect all select * from src;
 
-explain select value from a group by value intersect all select key from b 
group by key;
+explain select value from a_n10 group by value intersect all select key from 
b_n8 group by key;
 
-select value from a group by value intersect all select key from b group by 
key;
+select value from a_n10 group by value intersect all select key from b_n8 
group by key;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/intersect_distinct.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/intersect_distinct.q 
b/ql/src/test/queries/clientpositive/intersect_distinct.q
index 78b515d..221c2f7 100644
--- a/ql/src/test/queries/clientpositive/intersect_distinct.q
+++ b/ql/src/test/queries/clientpositive/intersect_distinct.q
@@ -2,33 +2,33 @@
 set hive.mapred.mode=nonstrict;
 set hive.cbo.enable=true;
 
-create table a(key int, value int);
+create table a_n17(key int, value int);
 
-insert into table a values (1,2),(1,2),(1,3),(2,3);
+insert into table a_n17 values (1,2),(1,2),(1,3),(2,3);
 
-create table b(key int, value int);
+create table b_n13(key int, value int);
 
-insert into table b values (1,2),(2,3);
+insert into table b_n13 values (1,2),(2,3);
 
-select key, count(1) as c from a group by key intersect all select value, 
max(key) as c from b group by value;
+select key, count(1) as c from a_n17 group by key intersect all select value, 
max(key) as c from b_n13 group by value;
 
-select * from a intersect distinct select * from b;
+select * from a_n17 intersect distinct select * from b_n13;
 
-select * from b intersect distinct select * from a intersect distinct select * 
from b;
+select * from b_n13 intersect distinct select * from a_n17 intersect distinct 
select * from b_n13;
 
-select * from a intersect distinct select * from b union all select * from a 
intersect distinct select * from b;
+select * from a_n17 intersect distinct select * from b_n13 union all select * 
from a_n17 intersect distinct select * from b_n13;
 
-select * from a intersect distinct select * from b union select * from a 
intersect distinct select * from b;
+select * from a_n17 intersect distinct select * from b_n13 union select * from 
a_n17 intersect distinct select * from b_n13;
 
-select * from a intersect distinct select * from b intersect distinct select * 
from a intersect distinct select * from b;
+select * from a_n17 intersect distinct select * from b_n13 intersect distinct 
select * from a_n17 intersect distinct select * from b_n13;
 
-select * from (select a.key, b.value from a join b on a.key=b.key)sub1 
+select * from (select a_n17.key, b_n13.value from a_n17 join b_n13 on 
a_n17.key=b_n13.key)sub1 
 intersect distinct 
-select * from (select a.key, b.value from a join b on a.key=b.key)sub2; 
+select * from (select a_n17.key, b_n13.value from a_n17 join b_n13 on 
a_n17.key=b_n13.key)sub2; 
 
-select * from (select a.key, b.value from a join b on a.key=b.key)sub1
+select * from (select a_n17.key, b_n13.value from a_n17 join b_n13 on 
a_n17.key=b_n13.key)sub1
 intersect distinct
-select * from (select b.value as key, a.key as value from a join b on 
a.key=b.key)sub2;
+select * from (select b_n13.value as key, a_n17.key as value from a_n17 join 
b_n13 on a_n17.key=b_n13.key)sub2;
 
 explain select * from src intersect distinct select * from src;
 
@@ -38,6 +38,6 @@ explain select * from src intersect distinct select * from 
src intersect distinc
 
 select * from src intersect distinct select * from src intersect distinct 
select * from src intersect distinct select * from src;
 
-explain select value from a group by value intersect distinct select key from 
b group by key;
+explain select value from a_n17 group by value intersect distinct select key 
from b_n13 group by key;
 
-select value from a group by value intersect distinct select key from b group 
by key;
+select value from a_n17 group by value intersect distinct select key from 
b_n13 group by key;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/intersect_merge.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/intersect_merge.q 
b/ql/src/test/queries/clientpositive/intersect_merge.q
index 0d8789e..7fd1267 100644
--- a/ql/src/test/queries/clientpositive/intersect_merge.q
+++ b/ql/src/test/queries/clientpositive/intersect_merge.q
@@ -1,27 +1,27 @@
 set hive.mapred.mode=nonstrict;
 set hive.cbo.enable=true;
 
-create table a(key int, value int);
+create table a_n7(key int, value int);
 
-insert into table a values (1,2),(1,2),(1,3),(2,3);
+insert into table a_n7 values (1,2),(1,2),(1,3),(2,3);
 
-create table b(key int, value int);
+create table b_n5(key int, value int);
 
-insert into table b values (1,2),(2,3);
+insert into table b_n5 values (1,2),(2,3);
 
-explain select * from b intersect distinct select * from a intersect distinct 
select * from b intersect distinct select * from a intersect distinct select * 
from b;
+explain select * from b_n5 intersect distinct select * from a_n7 intersect 
distinct select * from b_n5 intersect distinct select * from a_n7 intersect 
distinct select * from b_n5;
 
-explain (select * from b intersect distinct select * from a) intersect 
distinct (select * from b intersect distinct select * from a);
+explain (select * from b_n5 intersect distinct select * from a_n7) intersect 
distinct (select * from b_n5 intersect distinct select * from a_n7);
 
-explain select * from b intersect distinct (select * from a intersect distinct 
(select * from b intersect distinct (select * from a intersect distinct select 
* from b)));
+explain select * from b_n5 intersect distinct (select * from a_n7 intersect 
distinct (select * from b_n5 intersect distinct (select * from a_n7 intersect 
distinct select * from b_n5)));
 
-explain (((select * from b intersect distinct select * from a) intersect 
distinct select * from b) intersect distinct select * from a) intersect 
distinct select * from b;
+explain (((select * from b_n5 intersect distinct select * from a_n7) intersect 
distinct select * from b_n5) intersect distinct select * from a_n7) intersect 
distinct select * from b_n5;
 
-explain select * from b intersect distinct (select * from a intersect distinct 
select * from b) intersect distinct select * from a intersect distinct select * 
from b;
+explain select * from b_n5 intersect distinct (select * from a_n7 intersect 
distinct select * from b_n5) intersect distinct select * from a_n7 intersect 
distinct select * from b_n5;
 
-explain select * from b intersect distinct (select * from a intersect all 
select * from b);
+explain select * from b_n5 intersect distinct (select * from a_n7 intersect 
all select * from b_n5);
 
-explain select * from b intersect all (select * from a intersect all select * 
from b);
+explain select * from b_n5 intersect all (select * from a_n7 intersect all 
select * from b_n5);
 
-explain select * from b intersect all (select * from a intersect distinct 
select * from b);
+explain select * from b_n5 intersect all (select * from a_n7 intersect 
distinct select * from b_n5);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/interval_alt.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/interval_alt.q 
b/ql/src/test/queries/clientpositive/interval_alt.q
index 824e5d3..6711780 100644
--- a/ql/src/test/queries/clientpositive/interval_alt.q
+++ b/ql/src/test/queries/clientpositive/interval_alt.q
@@ -21,8 +21,8 @@ select
 select date '2012-01-01' + 30 days;
 select date '2012-01-01' - 30 days;
 
-create table t (dt int);
-insert into t values (1),(2);
+create table t_n18 (dt int);
+insert into t_n18 values (1),(2);
 
 -- expressions/columnref
 explain
@@ -31,11 +31,11 @@ select
        date '2012-01-01' - interval (-dt*dt) day,
        date '2012-01-01' + 1 day + '2' days,
        date '2012-01-01' + interval (dt || '-1') year to month
-       from t;
+       from t_n18;
 
 select
         date '2012-01-01' + interval (-dt*dt) day,
         date '2012-01-01' - interval (-dt*dt) day,
         date '2012-01-01' + 1 day + '2' days,
         date '2012-01-01' + interval (dt || '-1') year to month
-        from t;
+        from t_n18;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/interval_arithmetic.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/interval_arithmetic.q 
b/ql/src/test/queries/clientpositive/interval_arithmetic.q
index 7261311..09b3723 100644
--- a/ql/src/test/queries/clientpositive/interval_arithmetic.q
+++ b/ql/src/test/queries/clientpositive/interval_arithmetic.q
@@ -1,6 +1,6 @@
 --! qt:dataset:alltypesorc
-create table interval_arithmetic_1 (dateval date, tsval timestamp);
-insert overwrite table interval_arithmetic_1
+create table interval_arithmetic_1_n0 (dateval date, tsval timestamp);
+insert overwrite table interval_arithmetic_1_n0
   select cast(ctimestamp1 as date), ctimestamp1 from alltypesorc;
 
 -- interval year-month arithmetic
@@ -13,7 +13,7 @@ select
   dateval + interval '-2-2' year to month,
   - interval '2-2' year to month + dateval,
   interval '2-2' year to month + dateval
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 select
@@ -24,7 +24,7 @@ select
   dateval + interval '-2-2' year to month,
   - interval '2-2' year to month + dateval,
   interval '2-2' year to month + dateval
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 explain
@@ -33,7 +33,7 @@ select
   dateval - date '1999-06-07',
   date '1999-06-07' - dateval,
   dateval - dateval
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 select
@@ -41,7 +41,7 @@ select
   dateval - date '1999-06-07',
   date '1999-06-07' - dateval,
   dateval - dateval
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 explain
@@ -53,7 +53,7 @@ select
   tsval + interval '-2-2' year to month,
   - interval '2-2' year to month + tsval,
   interval '2-2' year to month + tsval
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 select
@@ -64,20 +64,20 @@ select
   tsval + interval '-2-2' year to month,
   - interval '2-2' year to month + tsval,
   interval '2-2' year to month + tsval
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 explain
 select
   interval '2-2' year to month + interval '3-3' year to month,
   interval '2-2' year to month - interval '3-3' year to month
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 select
   interval '2-2' year to month + interval '3-3' year to month,
   interval '2-2' year to month - interval '3-3' year to month
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 
@@ -91,7 +91,7 @@ select
   dateval + interval '-99 11:22:33.123456789' day to second,
   -interval '99 11:22:33.123456789' day to second + dateval,
   interval '99 11:22:33.123456789' day to second + dateval
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 select
@@ -102,7 +102,7 @@ select
   dateval + interval '-99 11:22:33.123456789' day to second,
   -interval '99 11:22:33.123456789' day to second + dateval,
   interval '99 11:22:33.123456789' day to second + dateval
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 explain
@@ -112,7 +112,7 @@ select
   dateval - tsval,
   tsval - dateval,
   tsval - tsval
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 select
@@ -121,7 +121,7 @@ select
   dateval - tsval,
   tsval - dateval,
   tsval - tsval
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 explain
@@ -133,7 +133,7 @@ select
   tsval + interval '-99 11:22:33.123456789' day to second,
   -interval '99 11:22:33.123456789' day to second + tsval,
   interval '99 11:22:33.123456789' day to second + tsval
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 select
@@ -144,23 +144,23 @@ select
   tsval + interval '-99 11:22:33.123456789' day to second,
   -interval '99 11:22:33.123456789' day to second + tsval,
   interval '99 11:22:33.123456789' day to second + tsval
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 explain
 select
   interval '99 11:22:33.123456789' day to second + interval '10 
9:8:7.123456789' day to second,
   interval '99 11:22:33.123456789' day to second - interval '10 
9:8:7.123456789' day to second
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 select
   interval '99 11:22:33.123456789' day to second + interval '10 
9:8:7.123456789' day to second,
   interval '99 11:22:33.123456789' day to second - interval '10 
9:8:7.123456789' day to second
-from interval_arithmetic_1
+from interval_arithmetic_1_n0
 limit 2;
 
 explain
-select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' 
day + interval '1' hour + interval '1' minute + interval '60' second from 
interval_arithmetic_1 limit 1;
-select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' 
day + interval '1' hour + interval '1' minute + interval '60' second from 
interval_arithmetic_1 limit 1;
-drop table interval_arithmetic_1;
+select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' 
day + interval '1' hour + interval '1' minute + interval '60' second from 
interval_arithmetic_1_n0 limit 1;
+select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' 
day + interval '1' hour + interval '1' minute + interval '60' second from 
interval_arithmetic_1_n0 limit 1;
+drop table interval_arithmetic_1_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/is_distinct_from.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/is_distinct_from.q 
b/ql/src/test/queries/clientpositive/is_distinct_from.q
index f135f4e..cf417a7 100644
--- a/ql/src/test/queries/clientpositive/is_distinct_from.q
+++ b/ql/src/test/queries/clientpositive/is_distinct_from.q
@@ -23,19 +23,19 @@ select 1 is not distinct from 1,
                null is not distinct from null
          from part;
 
-create table test(x string, y string);
-insert into test values ('q', 'q'), ('q', 'w'), (NULL, 'q'), ('q', NULL), 
(NULL, NULL);
-select *, x is not distinct from y, not (x is not distinct from y), (x is 
distinct from y) = true from test;
+create table test_n5(x string, y string);
+insert into test_n5 values ('q', 'q'), ('q', 'w'), (NULL, 'q'), ('q', NULL), 
(NULL, NULL);
+select *, x is not distinct from y, not (x is not distinct from y), (x is 
distinct from y) = true from test_n5;
 
-select *, x||y is not distinct from y||x, not (x||y||x is not distinct from 
y||x||x) from test;
+select *, x||y is not distinct from y||x, not (x||y||x is not distinct from 
y||x||x) from test_n5;
 
 -- where
-explain select * from test where y is distinct from null;
-select * from test where y is distinct from null;
+explain select * from test_n5 where y is distinct from null;
+select * from test_n5 where y is distinct from null;
 
-explain select * from test where y is not distinct from null;
-select * from test where y is not distinct from null;
-drop table test;
+explain select * from test_n5 where y is not distinct from null;
+select * from test_n5 where y is not distinct from null;
+drop table test_n5;
 
 -- where
 explain select * from part where p_size is distinct from 2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join1.q 
b/ql/src/test/queries/clientpositive/join1.q
index 25759a8..a5e0123 100644
--- a/ql/src/test/queries/clientpositive/join1.q
+++ b/ql/src/test/queries/clientpositive/join1.q
@@ -6,13 +6,13 @@ set 
hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest_j1_n15(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src src1 JOIN src src2 ON (src1.key = src2.key)
-INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value;
+INSERT OVERWRITE TABLE dest_j1_n15 SELECT src1.key, src2.value;
 
 FROM src src1 JOIN src src2 ON (src1.key = src2.key)
-INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value;
+INSERT OVERWRITE TABLE dest_j1_n15 SELECT src1.key, src2.value;
 
-SELECT dest_j1.* FROM dest_j1;
+SELECT dest_j1_n15.* FROM dest_j1_n15;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join14.q 
b/ql/src/test/queries/clientpositive/join14.q
index d233b42..e0f725c 100644
--- a/ql/src/test/queries/clientpositive/join14.q
+++ b/ql/src/test/queries/clientpositive/join14.q
@@ -4,7 +4,7 @@ set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
 -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
 
-CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n164(c1 INT, c2 STRING) STORED AS TEXTFILE;
 
 set mapreduce.framework.name=yarn;
 set mapreduce.jobtracker.address=localhost:58;
@@ -13,9 +13,9 @@ set hive.exec.mode.local.auto.input.files.max=6;
 
 EXPLAIN
 FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' 
and src.key > 100
-INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value;
+INSERT OVERWRITE TABLE dest1_n164 SELECT src.key, srcpart.value;
 
 FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' 
and src.key > 100
-INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value;
+INSERT OVERWRITE TABLE dest1_n164 SELECT src.key, srcpart.value;
 
-select dest1.* from dest1;
+select dest1_n164.* from dest1_n164;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join14_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join14_hadoop20.q 
b/ql/src/test/queries/clientpositive/join14_hadoop20.q
index e9107b5..489ad0c 100644
--- a/ql/src/test/queries/clientpositive/join14_hadoop20.q
+++ b/ql/src/test/queries/clientpositive/join14_hadoop20.q
@@ -2,16 +2,16 @@
 --! qt:dataset:src
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
 
-CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n49(c1 INT, c2 STRING) STORED AS TEXTFILE;
 
 set mapred.job.tracker=localhost:58;
 set hive.exec.mode.local.auto=true;
 
 EXPLAIN
 FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' 
and src.key > 100
-INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value;
+INSERT OVERWRITE TABLE dest1_n49 SELECT src.key, srcpart.value;
 
 FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' 
and src.key > 100
-INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value;
+INSERT OVERWRITE TABLE dest1_n49 SELECT src.key, srcpart.value;
 
-select dest1.* from dest1;
+select dest1_n49.* from dest1_n49;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join17.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join17.q 
b/ql/src/test/queries/clientpositive/join17.q
index a62b004..f62fa54 100644
--- a/ql/src/test/queries/clientpositive/join17.q
+++ b/ql/src/test/queries/clientpositive/join17.q
@@ -3,13 +3,13 @@
 set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS 
TEXTFILE;
+CREATE TABLE dest1_n121(key1 INT, value1 STRING, key2 INT, value2 STRING) 
STORED AS TEXTFILE;
 
 EXPLAIN EXTENDED
 FROM src src1 JOIN src src2 ON (src1.key = src2.key)
-INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*;
+INSERT OVERWRITE TABLE dest1_n121 SELECT src1.*, src2.*;
 
 FROM src src1 JOIN src src2 ON (src1.key = src2.key)
-INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*;
+INSERT OVERWRITE TABLE dest1_n121 SELECT src1.*, src2.*;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n121.* FROM dest1_n121;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join2.q 
b/ql/src/test/queries/clientpositive/join2.q
index 074255bb..f1416de 100644
--- a/ql/src/test/queries/clientpositive/join2.q
+++ b/ql/src/test/queries/clientpositive/join2.q
@@ -5,13 +5,13 @@ set hive.stats.column.autogather=false;
 set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest_j2_n2(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON 
(src1.key + src2.key = src3.key)
-INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value;
+INSERT OVERWRITE TABLE dest_j2_n2 SELECT src1.key, src3.value;
 
 FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON 
(src1.key + src2.key = src3.key)
-INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value;
+INSERT OVERWRITE TABLE dest_j2_n2 SELECT src1.key, src3.value;
 
-SELECT dest_j2.* FROM dest_j2;
+SELECT dest_j2_n2.* FROM dest_j2_n2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join25.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join25.q 
b/ql/src/test/queries/clientpositive/join25.q
index 18eecf5..3b888ad 100644
--- a/ql/src/test/queries/clientpositive/join25.q
+++ b/ql/src/test/queries/clientpositive/join25.q
@@ -2,18 +2,18 @@
 --! qt:dataset:src
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE;
+CREATE TABLE dest_j1_n18(key INT, value STRING, val2 STRING) STORED AS 
TEXTFILE;
 set hive.auto.convert.join=true;
 EXPLAIN
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n18 
 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key);
 
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n18 
 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key);
 
-select * from dest_j1 x;
+select * from dest_j1_n18 x;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join26.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join26.q 
b/ql/src/test/queries/clientpositive/join26.q
index bc1e7af..d2bb97b 100644
--- a/ql/src/test/queries/clientpositive/join26.q
+++ b/ql/src/test/queries/clientpositive/join26.q
@@ -3,21 +3,21 @@
 --! qt:dataset:src
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE;
+CREATE TABLE dest_j1_n10(key STRING, value STRING, val2 STRING) STORED AS 
TEXTFILE;
 
 set hive.auto.convert.join=true;
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n10
 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key) 
 JOIN srcpart z ON (x.key = z.key and z.ds='2008-04-08' and z.hr=11);
 
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n10
 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key) 
 JOIN srcpart z ON (x.key = z.key and z.ds='2008-04-08' and z.hr=11);
 
-select * from dest_j1 x;
+select * from dest_j1_n10 x;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join27.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join27.q 
b/ql/src/test/queries/clientpositive/join27.q
index fb2d89c..e1c2560 100644
--- a/ql/src/test/queries/clientpositive/join27.q
+++ b/ql/src/test/queries/clientpositive/join27.q
@@ -2,15 +2,15 @@
 --! qt:dataset:src
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE;
+CREATE TABLE dest_j1_n2(key INT, value STRING, val2 STRING) STORED AS TEXTFILE;
 set hive.auto.convert.join=true;
 EXPLAIN
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n2 
 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value
 FROM src1 x JOIN src y ON (x.value = y.value);
 
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n2 
 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value
 FROM src1 x JOIN src y ON (x.value = y.value);
 
-select * from dest_j1;
+select * from dest_j1_n2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join28.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join28.q 
b/ql/src/test/queries/clientpositive/join28.q
index 456e21f..868ce9d 100644
--- a/ql/src/test/queries/clientpositive/join28.q
+++ b/ql/src/test/queries/clientpositive/join28.q
@@ -4,7 +4,7 @@
 set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key STRING, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest_j1_n11(key STRING, value STRING) STORED AS TEXTFILE;
 
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
@@ -13,21 +13,21 @@ set hive.auto.convert.join.noconditionaltask.size=10000;
 -- Since the inputs are small, it should be automatically converted to mapjoin
 
 EXPLAIN
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n11 
 SELECT subq.key1, z.value
 FROM
 (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 
  FROM src1 x JOIN src y ON (x.key = y.key)) subq
  JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11);
 
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n11 
 SELECT subq.key1, z.value
 FROM
 (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 
  FROM src1 x JOIN src y ON (x.key = y.key)) subq
  JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11);
 
-select * from dest_j1;
+select * from dest_j1_n11;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join29.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join29.q 
b/ql/src/test/queries/clientpositive/join29.q
index d86eb03..20aadbb 100644
--- a/ql/src/test/queries/clientpositive/join29.q
+++ b/ql/src/test/queries/clientpositive/join29.q
@@ -3,7 +3,7 @@
 set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key STRING, cnt1 INT, cnt2 INT);
+CREATE TABLE dest_j1_n6(key STRING, cnt1 INT, cnt2 INT);
 
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
@@ -12,14 +12,14 @@ set hive.auto.convert.join.noconditionaltask.size=10000;
 -- Since the inputs are small, it should be automatically converted to mapjoin
 
 EXPLAIN
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n6 
 SELECT subq1.key, subq1.cnt, subq2.cnt
 FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN 
      (select y.key, count(1) as cnt from src y group by y.key) subq2 ON 
(subq1.key = subq2.key);
 
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n6 
 SELECT subq1.key, subq1.cnt, subq2.cnt
 FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN 
      (select y.key, count(1) as cnt from src y group by y.key) subq2 ON 
(subq1.key = subq2.key);
 
-select * from dest_j1;
+select * from dest_j1_n6;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join3.q 
b/ql/src/test/queries/clientpositive/join3.q
index da7cfa1..ba4b21c 100644
--- a/ql/src/test/queries/clientpositive/join3.q
+++ b/ql/src/test/queries/clientpositive/join3.q
@@ -3,13 +3,13 @@
 set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n46(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON 
(src1.key = src3.key)
-INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value;
+INSERT OVERWRITE TABLE dest1_n46 SELECT src1.key, src3.value;
 
 FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON 
(src1.key = src3.key)
-INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value;
+INSERT OVERWRITE TABLE dest1_n46 SELECT src1.key, src3.value;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n46.* FROM dest1_n46;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join30.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join30.q 
b/ql/src/test/queries/clientpositive/join30.q
index ec2bae6..9c0ecaf 100644
--- a/ql/src/test/queries/clientpositive/join30.q
+++ b/ql/src/test/queries/clientpositive/join30.q
@@ -2,13 +2,13 @@
 --! qt:dataset:src
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key INT, cnt INT);
+CREATE TABLE dest_j1_n0(key INT, cnt INT);
 set hive.auto.convert.join=true;
 EXPLAIN
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n0 
 SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = 
y.key) group by x.key;
 
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n0 
 SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = 
y.key) group by x.key;
 
-select * from dest_j1;
+select * from dest_j1_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join31.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join31.q 
b/ql/src/test/queries/clientpositive/join31.q
index 4fbf204..2083407 100644
--- a/ql/src/test/queries/clientpositive/join31.q
+++ b/ql/src/test/queries/clientpositive/join31.q
@@ -4,7 +4,7 @@ set hive.mapred.mode=nonstrict;
 set hive.optimize.semijoin.conversion=true;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key STRING, cnt INT);
+CREATE TABLE dest_j1_n22(key STRING, cnt INT);
 
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
@@ -13,16 +13,16 @@ set hive.auto.convert.join.noconditionaltask.size=10000;
 -- Since the inputs are small, it should be automatically converted to mapjoin
 
 EXPLAIN 
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n22 
 SELECT subq1.key, count(1) as cnt
 FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN 
      (select y.key, count(1) as cnt from src y group by y.key) subq2 ON 
(subq1.key = subq2.key)
 group by subq1.key;
 
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n22 
 SELECT subq1.key, count(1) as cnt
 FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN 
      (select y.key, count(1) as cnt from src y group by y.key) subq2 ON 
(subq1.key = subq2.key)
 group by subq1.key;
 
-select * from dest_j1;
+select * from dest_j1_n22;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join32.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join32.q 
b/ql/src/test/queries/clientpositive/join32.q
index 9d3d645..c55e730 100644
--- a/ql/src/test/queries/clientpositive/join32.q
+++ b/ql/src/test/queries/clientpositive/join32.q
@@ -4,7 +4,7 @@
 set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE;
+CREATE TABLE dest_j1_n12(key STRING, value STRING, val2 STRING) STORED AS 
TEXTFILE;
 
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
@@ -13,17 +13,17 @@ set hive.auto.convert.join.noconditionaltask.size=10000;
 -- Since the inputs are small, it should be automatically converted to mapjoin
 
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n12
 SELECT x.key, z.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key) 
 JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11);
 
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n12
 SELECT x.key, z.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key) 
 JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11);
 
-select * from dest_j1;
+select * from dest_j1_n12;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join32_lessSize.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join32_lessSize.q 
b/ql/src/test/queries/clientpositive/join32_lessSize.q
index 6114300..229ba56 100644
--- a/ql/src/test/queries/clientpositive/join32_lessSize.q
+++ b/ql/src/test/queries/clientpositive/join32_lessSize.q
@@ -4,8 +4,8 @@
 set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE;
-CREATE TABLE dest_j2(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE;
+CREATE TABLE dest_j1_n21(key STRING, value STRING, val2 STRING) STORED AS 
TEXTFILE;
+CREATE TABLE dest_j2_n1(key STRING, value STRING, val2 STRING) STORED AS 
TEXTFILE;
 
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
@@ -14,81 +14,81 @@ set hive.auto.convert.join.noconditionaltask.size=6000;
 -- Since the inputs are small, it should be automatically converted to mapjoin
 
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n21
 SELECT x.key, z.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key) 
 JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11);
 
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n21
 SELECT x.key, z.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key) 
 JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11);
 
-select * from dest_j1;
+select * from dest_j1_n21;
 
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n21
 SELECT x.key, z.value, y.value
 FROM src w JOIN src1 x ON (x.value = w.value) 
 JOIN src y ON (x.key = y.key) 
 JOIN src1 z ON (x.key = z.key);
 
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n21
 SELECT x.key, z.value, y.value
 FROM src w JOIN src1 x ON (x.value = w.value) 
 JOIN src y ON (x.key = y.key) 
 JOIN src1 z ON (x.key = z.key);
 
-select * from dest_j1;
+select * from dest_j1_n21;
 
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest_j2
+INSERT OVERWRITE TABLE dest_j2_n1
 SELECT res.key, z.value, res.value
 FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res 
 JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11);
 
-INSERT OVERWRITE TABLE dest_j2
+INSERT OVERWRITE TABLE dest_j2_n1
 SELECT res.key, z.value, res.value
 FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res 
 JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11);
 
-select * from dest_j2;
+select * from dest_j2_n1;
 
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest_j2
+INSERT OVERWRITE TABLE dest_j2_n1
 SELECT res.key, z.value, res.value
 FROM (select x.key, x.value from src1 x LEFT OUTER JOIN src y ON (x.key = 
y.key)) res 
 JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11);
 
-INSERT OVERWRITE TABLE dest_j2
+INSERT OVERWRITE TABLE dest_j2_n1
 SELECT res.key, z.value, res.value
 FROM (select x.key, x.value from src1 x LEFT OUTER JOIN src y ON (x.key = 
y.key)) res 
 JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11);
 
-select * from dest_j2;
+select * from dest_j2_n1;
 
 EXPLAIN
-INSERT OVERWRITE TABLE dest_j2
+INSERT OVERWRITE TABLE dest_j2_n1
 SELECT res.key, x.value, res.value  
 FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res 
 JOIN srcpart x ON (res.value = x.value and x.ds='2008-04-08' and x.hr=11);
 
-INSERT OVERWRITE TABLE dest_j2
+INSERT OVERWRITE TABLE dest_j2_n1
 SELECT res.key, x.value, res.value  
 FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res 
 JOIN srcpart x ON (res.value = x.value and x.ds='2008-04-08' and x.hr=11);
 
-select * from dest_j2;
+select * from dest_j2_n1;
 
 EXPLAIN
-INSERT OVERWRITE TABLE dest_j2
+INSERT OVERWRITE TABLE dest_j2_n1
 SELECT res.key, y.value, res.value
 FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res 
 JOIN srcpart y ON (res.value = y.value and y.ds='2008-04-08' and y.hr=11);
 
-INSERT OVERWRITE TABLE dest_j2
+INSERT OVERWRITE TABLE dest_j2_n1
 SELECT res.key, y.value, res.value
 FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res 
 JOIN srcpart y ON (res.value = y.value and y.ds='2008-04-08' and y.hr=11);
 
-select * from dest_j2;
+select * from dest_j2_n1;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join33.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join33.q 
b/ql/src/test/queries/clientpositive/join33.q
index 9d3d645..1527575 100644
--- a/ql/src/test/queries/clientpositive/join33.q
+++ b/ql/src/test/queries/clientpositive/join33.q
@@ -4,7 +4,7 @@
 set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE;
+CREATE TABLE dest_j1_n7(key STRING, value STRING, val2 STRING) STORED AS 
TEXTFILE;
 
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
@@ -13,17 +13,17 @@ set hive.auto.convert.join.noconditionaltask.size=10000;
 -- Since the inputs are small, it should be automatically converted to mapjoin
 
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n7
 SELECT x.key, z.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key) 
 JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11);
 
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n7
 SELECT x.key, z.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key) 
 JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11);
 
-select * from dest_j1;
+select * from dest_j1_n7;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join34.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join34.q 
b/ql/src/test/queries/clientpositive/join34.q
index 5077c19..e0234c6 100644
--- a/ql/src/test/queries/clientpositive/join34.q
+++ b/ql/src/test/queries/clientpositive/join34.q
@@ -3,7 +3,7 @@
 set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE;
+CREATE TABLE dest_j1_n1(key STRING, value STRING, val2 STRING) STORED AS 
TEXTFILE;
 
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
@@ -12,7 +12,7 @@ set hive.auto.convert.join.noconditionaltask.size=10000;
 -- Since the inputs are small, it should be automatically converted to mapjoin
 
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n1
 SELECT x.key, x.value, subq1.value
 FROM 
 ( SELECT x.key as key, x.value as value from src x where x.key < 20
@@ -21,7 +21,7 @@ FROM
 ) subq1
 JOIN src1 x ON (x.key = subq1.key);
 
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n1
 SELECT x.key, x.value, subq1.value
 FROM 
 ( SELECT x.key as key, x.value as value from src x where x.key < 20
@@ -30,7 +30,7 @@ FROM
 ) subq1
 JOIN src1 x ON (x.key = subq1.key);
 
-select * from dest_j1;
+select * from dest_j1_n1;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join35.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join35.q 
b/ql/src/test/queries/clientpositive/join35.q
index be3703e..29b6b00 100644
--- a/ql/src/test/queries/clientpositive/join35.q
+++ b/ql/src/test/queries/clientpositive/join35.q
@@ -3,7 +3,7 @@
 set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key STRING, value STRING, val2 INT) STORED AS TEXTFILE;
+CREATE TABLE dest_j1_n24(key STRING, value STRING, val2 INT) STORED AS 
TEXTFILE;
 
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
@@ -12,7 +12,7 @@ set hive.auto.convert.join.noconditionaltask.size=10000;
 -- Since the inputs are small, it should be automatically converted to mapjoin
 
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n24
 SELECT x.key, x.value, subq1.cnt
 FROM 
 ( SELECT x.key as key, count(1) as cnt from src x where x.key < 20 group by 
x.key
@@ -21,7 +21,7 @@ FROM
 ) subq1
 JOIN src1 x ON (x.key = subq1.key);
 
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n24
 SELECT x.key, x.value, subq1.cnt
 FROM 
 ( SELECT x.key as key, count(1) as cnt from src x where x.key < 20 group by 
x.key
@@ -30,7 +30,7 @@ FROM
 ) subq1
 JOIN src1 x ON (x.key = subq1.key);
 
-select * from dest_j1;
+select * from dest_j1_n24;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join36.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join36.q 
b/ql/src/test/queries/clientpositive/join36.q
index 20005dd..a2aaa50 100644
--- a/ql/src/test/queries/clientpositive/join36.q
+++ b/ql/src/test/queries/clientpositive/join36.q
@@ -1,26 +1,26 @@
 --! qt:dataset:src
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE tmp1(key INT, cnt INT);
-CREATE TABLE tmp2(key INT, cnt INT);
-CREATE TABLE dest_j1(key INT, value INT, val2 INT);
+CREATE TABLE tmp1_n0(key INT, cnt INT);
+CREATE TABLE tmp2_n0(key INT, cnt INT);
+CREATE TABLE dest_j1_n13(key INT, value INT, val2 INT);
 
-INSERT OVERWRITE TABLE tmp1
+INSERT OVERWRITE TABLE tmp1_n0
 SELECT key, count(1) from src group by key;
 
-INSERT OVERWRITE TABLE tmp2
+INSERT OVERWRITE TABLE tmp2_n0
 SELECT key, count(1) from src group by key;
 set hive.auto.convert.join=true;
 EXPLAIN
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n13 
 SELECT /*+ MAPJOIN(x) */ x.key, x.cnt, y.cnt
-FROM tmp1 x JOIN tmp2 y ON (x.key = y.key);
+FROM tmp1_n0 x JOIN tmp2_n0 y ON (x.key = y.key);
 
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n13 
 SELECT /*+ MAPJOIN(x) */ x.key, x.cnt, y.cnt
-FROM tmp1 x JOIN tmp2 y ON (x.key = y.key);
+FROM tmp1_n0 x JOIN tmp2_n0 y ON (x.key = y.key);
 
-select * from dest_j1;
+select * from dest_j1_n13;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join37.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join37.q 
b/ql/src/test/queries/clientpositive/join37.q
index 3a19dd2..ad17c25 100644
--- a/ql/src/test/queries/clientpositive/join37.q
+++ b/ql/src/test/queries/clientpositive/join37.q
@@ -2,19 +2,19 @@
 --! qt:dataset:src
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE;
+CREATE TABLE dest_j1_n9(key INT, value STRING, val2 STRING) STORED AS TEXTFILE;
 
 set hive.auto.convert.join=true;
 EXPLAIN
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n9 
 SELECT /*+ MAPJOIN(X) */ x.key, x.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key);
 
-INSERT OVERWRITE TABLE dest_j1 
+INSERT OVERWRITE TABLE dest_j1_n9 
 SELECT /*+ MAPJOIN(X) */ x.key, x.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key);
 
-select * from dest_j1;
+select * from dest_j1_n9;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join38.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join38.q 
b/ql/src/test/queries/clientpositive/join38.q
index b9f723c..a1dbaa7 100644
--- a/ql/src/test/queries/clientpositive/join38.q
+++ b/ql/src/test/queries/clientpositive/join38.q
@@ -1,20 +1,20 @@
 --! qt:dataset:src
 
 
-create table tmp(col0 string, col1 string,col2 string,col3 string,col4 
string,col5 string,col6 string,col7 string,col8 string,col9 string,col10 
string,col11 string);
+create table tmp_n1(col0 string, col1 string,col2 string,col3 string,col4 
string,col5 string,col6 string,col7 string,col8 string,col9 string,col10 
string,col11 string);
 
-insert overwrite table tmp select key, cast(key + 1 as int), key +2, key+3, 
key+4, cast(key+5 as int), key+6, key+7, key+8, key+9, key+10, cast(key+11 as 
int) from src where key = 100;
+insert overwrite table tmp_n1 select key, cast(key + 1 as int), key +2, key+3, 
key+4, cast(key+5 as int), key+6, key+7, key+8, key+9, key+10, cast(key+11 as 
int) from src where key = 100;
 
-select * from tmp;
+select * from tmp_n1;
 set hive.auto.convert.join=true;
 
 explain
-FROM src a JOIN tmp b ON (a.key = b.col11)
+FROM src a JOIN tmp_n1 b ON (a.key = b.col11)
 SELECT /*+ MAPJOIN(a) */ a.value, b.col5, count(1) as count
 where b.col11 = 111
 group by a.value, b.col5;
 
-FROM src a JOIN tmp b ON (a.key = b.col11)
+FROM src a JOIN tmp_n1 b ON (a.key = b.col11)
 SELECT /*+ MAPJOIN(a) */ a.value, b.col5, count(1) as count
 where b.col11 = 111
 group by a.value, b.col5;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join39.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join39.q 
b/ql/src/test/queries/clientpositive/join39.q
index b0358e9..77832ab 100644
--- a/ql/src/test/queries/clientpositive/join39.q
+++ b/ql/src/test/queries/clientpositive/join39.q
@@ -1,20 +1,20 @@
 --! qt:dataset:src
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key STRING, value STRING, key1 string, val2 STRING) 
STORED AS TEXTFILE;
+CREATE TABLE dest_j1_n8(key STRING, value STRING, key1 string, val2 STRING) 
STORED AS TEXTFILE;
 set hive.auto.convert.join=true;
 
 explain
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n8
 SELECT /*+ MAPJOIN(y) */ x.key, x.value, y.key, y.value
 FROM src x left outer JOIN (select * from src where key <= 100) y ON (x.key = 
y.key);
 
 
-INSERT OVERWRITE TABLE dest_j1
+INSERT OVERWRITE TABLE dest_j1_n8
 SELECT /*+ MAPJOIN(y) */ x.key, x.value, y.key, y.value
 FROM src x left outer JOIN (select * from src where key <= 100) y ON (x.key = 
y.key);
 
-select * from dest_j1;
+select * from dest_j1_n8;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/join4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join4.q 
b/ql/src/test/queries/clientpositive/join4.q
index 055ac3d..a501544 100644
--- a/ql/src/test/queries/clientpositive/join4.q
+++ b/ql/src/test/queries/clientpositive/join4.q
@@ -3,7 +3,7 @@
 set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n72(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS 
TEXTFILE;
 
 EXPLAIN
 FROM (
@@ -18,7 +18,7 @@ FROM (
  ON (a.c1 = b.c3)
  SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4
 ) c
-INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4;
+INSERT OVERWRITE TABLE dest1_n72 SELECT c.c1, c.c2, c.c3, c.c4;
 
 FROM (
  FROM 
@@ -32,6 +32,6 @@ FROM (
  ON (a.c1 = b.c3)
  SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4
 ) c
-INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4;
+INSERT OVERWRITE TABLE dest1_n72 SELECT c.c1, c.c2, c.c3, c.c4;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n72.* FROM dest1_n72;

Reply via email to