PHOENIX-3290 Move and/or combine as many NeedsOwnCluster tests to bring down 
test run time


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2d27179b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2d27179b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2d27179b

Branch: refs/heads/master
Commit: 2d27179b3fcfc7a8d2c8f7bad4a37b3071c81961
Parents: 7601d59
Author: James Taylor <jamestay...@apache.org>
Authored: Tue Sep 20 18:39:05 2016 -0700
Committer: James Taylor <jamestay...@apache.org>
Committed: Tue Sep 20 18:39:05 2016 -0700

----------------------------------------------------------------------
 .../StatisticsCollectionRunTrackerIT.java       |  25 +-
 .../BaseOwnClusterClientManagedTimeIT.java      |  29 --
 .../BaseOwnClusterHBaseManagedTimeIT.java       |  29 --
 .../phoenix/end2end/BaseOwnClusterIT.java       |   7 +
 .../end2end/BaseTenantSpecificTablesIT.java     |  87 +++---
 .../org/apache/phoenix/end2end/BaseViewIT.java  |  81 ++---
 .../end2end/CountDistinctCompressionIT.java     |   2 +-
 .../phoenix/end2end/CsvBulkLoadToolIT.java      |   2 +-
 .../apache/phoenix/end2end/IndexExtendedIT.java |   2 +-
 .../org/apache/phoenix/end2end/KeyOnlyIT.java   | 127 +++-----
 .../phoenix/end2end/MultiCfQueryExecIT.java     | 306 ++++++++-----------
 .../phoenix/end2end/ParallelIteratorsIT.java    | 112 +++----
 .../phoenix/end2end/ParallelStatsEnabledIT.java |  16 +-
 .../apache/phoenix/end2end/QueryTimeoutIT.java  |   2 +-
 .../phoenix/end2end/QueryWithLimitIT.java       |  10 +-
 .../phoenix/end2end/QueryWithOffsetIT.java      |  78 ++---
 .../apache/phoenix/end2end/RenewLeaseIT.java    |   2 +-
 .../phoenix/end2end/SpillableGroupByIT.java     | 127 ++++----
 .../end2end/StatsCollectionDisabledIT.java      |  79 -----
 .../end2end/StatsCollectorAbstractIT.java       |  77 -----
 .../phoenix/end2end/StatsCollectorIT.java       | 161 +++++++++-
 .../StatsCollectorWithSplitsAndMultiCFIT.java   | 186 -----------
 .../end2end/TenantSpecificTablesDDLIT.java      | 272 ++++++++---------
 .../end2end/TenantSpecificTablesDMLIT.java      | 269 ++++++----------
 .../phoenix/end2end/TransactionalViewIT.java    |  48 ++-
 .../org/apache/phoenix/end2end/UpgradeIT.java   | 104 ++++---
 .../phoenix/end2end/UserDefinedFunctionsIT.java |   7 +-
 .../java/org/apache/phoenix/end2end/ViewIT.java | 249 ++++++++-------
 .../index/ImmutableIndexWithStatsIT.java        |  26 +-
 .../end2end/index/MutableIndexFailureIT.java    |   4 +-
 .../end2end/index/ReadOnlyIndexFailureIT.java   |   4 +-
 .../end2end/index/txn/TxWriteFailureIT.java     |   4 +-
 .../apache/phoenix/execute/PartialCommitIT.java |   5 +
 .../RoundRobinResultIteratorWithStatsIT.java    |   4 +-
 .../phoenix/monitoring/PhoenixMetricsIT.java    |   4 +-
 .../apache/phoenix/rpc/PhoenixClientRpcIT.java  |   4 +-
 .../apache/phoenix/rpc/PhoenixServerRpcIT.java  |   4 +-
 .../org/apache/phoenix/util/MetaDataUtil.java   |   4 +
 .../java/org/apache/phoenix/query/BaseTest.java |   4 -
 .../java/org/apache/phoenix/util/TestUtil.java  |  17 +-
 pom.xml                                         |   3 -
 41 files changed, 1105 insertions(+), 1478 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
index bf567f0..bd88922 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
@@ -33,20 +33,26 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
+import org.apache.phoenix.end2end.ParallelStatsEnabledIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker;
 import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import com.google.common.collect.Maps;
 
-public class StatisticsCollectionRunTrackerIT extends 
BaseOwnClusterHBaseManagedTimeIT {
+public class StatisticsCollectionRunTrackerIT extends ParallelStatsEnabledIT {
     private static final StatisticsCollectionRunTracker tracker = 
StatisticsCollectionRunTracker
             .getInstance(new Configuration());
+
+    private String fullTableName;
+
     @BeforeClass
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
@@ -56,9 +62,16 @@ public class StatisticsCollectionRunTrackerIT extends 
BaseOwnClusterHBaseManaged
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
     }
     
+    @Before
+    public void generateTableNames() {
+        String schemaName = TestUtil.DEFAULT_SCHEMA_NAME;
+        String tableName = "T_" + generateRandomString();
+        fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+    }
+
     @Test
     public void testStateBeforeAndAfterUpdateStatsCommand() throws Exception {
-        String tableName = 
"testStateBeforeAndAfterUpdateStatsCommand".toUpperCase();
+        String tableName = fullTableName;
         HRegionInfo regionInfo = createTableAndGetRegion(tableName);
         StatisticsCollectionRunTracker tracker =
                 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
@@ -73,7 +86,7 @@ public class StatisticsCollectionRunTrackerIT extends 
BaseOwnClusterHBaseManaged
     
     @Test
     public void testStateBeforeAndAfterMajorCompaction() throws Exception {
-        String tableName = 
"testStateBeforeAndAfterMajorCompaction".toUpperCase();
+        String tableName = fullTableName;
         HRegionInfo regionInfo = createTableAndGetRegion(tableName);
         StatisticsCollectionRunTracker tracker =
                 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
@@ -101,7 +114,7 @@ public class StatisticsCollectionRunTrackerIT extends 
BaseOwnClusterHBaseManaged
     
     @Test
     public void testMajorCompactionPreventsUpdateStatsFromRunning() throws 
Exception {
-        String tableName = 
"testMajorCompactionPreventsUpdateStatsFromRunning".toUpperCase();
+        String tableName = fullTableName;
         HRegionInfo regionInfo = createTableAndGetRegion(tableName);
         // simulate stats collection via major compaction by marking the 
region as compacting in the tracker
         markRegionAsCompacting(regionInfo);
@@ -114,7 +127,7 @@ public class StatisticsCollectionRunTrackerIT extends 
BaseOwnClusterHBaseManaged
     
     @Test
     public void testUpdateStatsPreventsAnotherUpdateStatsFromRunning() throws 
Exception {
-        String tableName = 
"testUpdateStatsPreventsAnotherUpdateStatsFromRunning".toUpperCase();
+        String tableName = fullTableName;
         HRegionInfo regionInfo = createTableAndGetRegion(tableName);
         markRunningUpdateStats(regionInfo);
         Assert.assertEquals("Row count didn't match", 
CONCURRENT_UPDATE_STATS_ROW_COUNT,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterClientManagedTimeIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterClientManagedTimeIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterClientManagedTimeIT.java
deleted file mode 100644
index 6ece674..0000000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterClientManagedTimeIT.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import org.junit.After;
-
-
-public class BaseOwnClusterClientManagedTimeIT extends BaseOwnClusterIT {
-    @After
-    public void cleanUpAfterTest() throws Exception {
-        long ts = nextTimestamp();
-        deletePriorMetaData(ts - 1, getUrl());
-    }    
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterHBaseManagedTimeIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterHBaseManagedTimeIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterHBaseManagedTimeIT.java
deleted file mode 100644
index 63a4300..0000000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterHBaseManagedTimeIT.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import org.apache.hadoop.hbase.HConstants;
-import org.junit.After;
-
-
-public class BaseOwnClusterHBaseManagedTimeIT extends BaseOwnClusterIT {
-    @After
-    public void cleanUpAfterTest() throws Exception {
-        deletePriorMetaData(HConstants.LATEST_TIMESTAMP, getUrl());
-    }
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterIT.java
index 222efcb..44bd3a1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterIT.java
@@ -17,7 +17,9 @@
  */
 package org.apache.phoenix.end2end;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.query.BaseTest;
+import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.experimental.categories.Category;
 
@@ -27,4 +29,9 @@ public class BaseOwnClusterIT extends BaseTest {
     public static void doTeardown() throws Exception {
         tearDownMiniCluster();
     }
+
+    @After
+    public void cleanUpAfterTest() throws Exception {
+        deletePriorMetaData(HConstants.LATEST_TIMESTAMP, getUrl());
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
index 1e87b8f..17918d6 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
@@ -20,64 +20,61 @@ package org.apache.phoenix.end2end;
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
 
 import java.sql.SQLException;
-import java.util.Map;
 
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.query.BaseTest;
 import org.junit.Before;
-import org.junit.BeforeClass;
 
-import com.google.common.collect.Maps;
 
-
-public abstract class BaseTenantSpecificTablesIT extends 
BaseOwnClusterClientManagedTimeIT {
-    protected static final String TENANT_ID = "ZZTop";
-    protected static final String TENANT_TYPE_ID = "abc";
-    protected static String PHOENIX_JDBC_TENANT_SPECIFIC_URL;
-    protected static final String TENANT_ID2 = "Styx";
-    protected static String PHOENIX_JDBC_TENANT_SPECIFIC_URL2;
+public abstract class BaseTenantSpecificTablesIT extends 
ParallelStatsEnabledIT {
+    protected String TENANT_ID;
+    protected String TENANT_TYPE_ID = "abc";
+    protected String PHOENIX_JDBC_TENANT_SPECIFIC_URL;
+    protected String TENANT_ID2;
+    protected String PHOENIX_JDBC_TENANT_SPECIFIC_URL2;
     
-    protected static final String PARENT_TABLE_NAME = "PARENT_TABLE";
-    protected static final String PARENT_TABLE_DDL = "CREATE TABLE " + 
PARENT_TABLE_NAME + " ( \n" + 
-            "                user VARCHAR ,\n" + 
-            "                tenant_id VARCHAR(5) NOT NULL,\n" + 
-            "                tenant_type_id VARCHAR(3) NOT NULL, \n" + 
-            "                id INTEGER NOT NULL\n" + 
-            "                CONSTRAINT pk PRIMARY KEY (tenant_id, 
tenant_type_id, id)) MULTI_TENANT=true, IMMUTABLE_ROWS=true";
+    protected String PARENT_TABLE_NAME;
+    protected String PARENT_TABLE_DDL;
     
-    protected static final String TENANT_TABLE_NAME = "TENANT_TABLE";
-    protected static final String TENANT_TABLE_DDL = "CREATE VIEW " + 
TENANT_TABLE_NAME + " ( \n" + 
-            "                tenant_col VARCHAR) AS SELECT *\n" + 
-            "                FROM " + PARENT_TABLE_NAME + " WHERE 
tenant_type_id= '" + TENANT_TYPE_ID + "'";
+    protected String TENANT_TABLE_NAME;
+    protected String TENANT_TABLE_DDL;
     
-    protected static final String PARENT_TABLE_NAME_NO_TENANT_TYPE_ID = 
"PARENT_TABLE_NO_TENANT_TYPE_ID";
-    protected static final String PARENT_TABLE_DDL_NO_TENANT_TYPE_ID = "CREATE 
TABLE " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " ( \n" + 
-            "                user VARCHAR ,\n" + 
-            "                tenant_id VARCHAR(5) NOT NULL,\n" + 
-            "                id INTEGER NOT NULL,\n" + 
-            "                CONSTRAINT pk PRIMARY KEY (tenant_id, id)) 
MULTI_TENANT=true, IMMUTABLE_ROWS=true";
+    protected String PARENT_TABLE_NAME_NO_TENANT_TYPE_ID;
+    protected String PARENT_TABLE_DDL_NO_TENANT_TYPE_ID;
     
-    protected static final String TENANT_TABLE_NAME_NO_TENANT_TYPE_ID = 
"TENANT_TABLE_NO_TENANT_TYPE_ID";
-    protected static final String TENANT_TABLE_DDL_NO_TENANT_TYPE_ID = "CREATE 
VIEW " + TENANT_TABLE_NAME_NO_TENANT_TYPE_ID + " ( \n" + 
-            "                tenant_col VARCHAR) AS SELECT *\n" + 
-            "                FROM " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID;
+    protected String TENANT_TABLE_NAME_NO_TENANT_TYPE_ID;
+    protected String TENANT_TABLE_DDL_NO_TENANT_TYPE_ID;
     
     
     @Before
     public void createTables() throws SQLException {
-        createTestTable(getUrl(), PARENT_TABLE_DDL, null, nextTimestamp());
-        createTestTable(getUrl(), PARENT_TABLE_DDL_NO_TENANT_TYPE_ID, null, 
nextTimestamp());
-        createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, TENANT_TABLE_DDL, 
null, nextTimestamp());
-        createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, 
TENANT_TABLE_DDL_NO_TENANT_TYPE_ID, null, nextTimestamp());
-    }
-    
-    @BeforeClass
-    public static void doSetup() throws Exception {
-        Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
-        // Must update config before starting server
-        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 
Long.toString(20));
-        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+        TENANT_ID = "T_" + BaseTest.generateRandomString();
+        TENANT_ID2 = "T_" + BaseTest.generateRandomString();
         PHOENIX_JDBC_TENANT_SPECIFIC_URL = getUrl() + ';' + TENANT_ID_ATTRIB + 
'=' + TENANT_ID;
         PHOENIX_JDBC_TENANT_SPECIFIC_URL2 = getUrl() + ';' + TENANT_ID_ATTRIB 
+ '=' + TENANT_ID2;
+        PARENT_TABLE_NAME = "P_" + BaseTest.generateRandomString();
+        TENANT_TABLE_NAME = "V_" + BaseTest.generateRandomString();
+        PARENT_TABLE_NAME_NO_TENANT_TYPE_ID = "P_" + 
BaseTest.generateRandomString();
+        TENANT_TABLE_NAME_NO_TENANT_TYPE_ID = "V_" + 
BaseTest.generateRandomString();
+        PARENT_TABLE_DDL = "CREATE TABLE " + PARENT_TABLE_NAME + " ( \n" + 
+                "                user VARCHAR ,\n" + 
+                "                tenant_id VARCHAR NOT NULL,\n" + 
+                "                tenant_type_id VARCHAR(3) NOT NULL, \n" + 
+                "                id INTEGER NOT NULL\n" + 
+                "                CONSTRAINT pk PRIMARY KEY (tenant_id, 
tenant_type_id, id)) MULTI_TENANT=true, IMMUTABLE_ROWS=true";
+        TENANT_TABLE_DDL = "CREATE VIEW " + TENANT_TABLE_NAME + " ( \n" + 
+                "                tenant_col VARCHAR) AS SELECT *\n" + 
+                "                FROM " + PARENT_TABLE_NAME + " WHERE 
tenant_type_id= '" + TENANT_TYPE_ID + "'";
+        PARENT_TABLE_DDL_NO_TENANT_TYPE_ID = "CREATE TABLE " + 
PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " ( \n" + 
+                "                user VARCHAR ,\n" + 
+                "                tenant_id VARCHAR NOT NULL,\n" + 
+                "                id INTEGER NOT NULL,\n" + 
+                "                CONSTRAINT pk PRIMARY KEY (tenant_id, id)) 
MULTI_TENANT=true, IMMUTABLE_ROWS=true";
+        TENANT_TABLE_DDL_NO_TENANT_TYPE_ID = "CREATE VIEW " + 
TENANT_TABLE_NAME_NO_TENANT_TYPE_ID + " ( \n" + 
+                "                tenant_col VARCHAR) AS SELECT *\n" + 
+                "                FROM " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID;
+        createTestTable(getUrl(), PARENT_TABLE_DDL);
+        createTestTable(getUrl(), PARENT_TABLE_DDL_NO_TENANT_TYPE_ID);
+        createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, TENANT_TABLE_DDL);
+        createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, 
TENANT_TABLE_DDL_NO_TENANT_TYPE_ID);
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
index a3c36fa..559c000 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
@@ -44,6 +44,7 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.ScanUtil;
@@ -57,19 +58,20 @@ import org.junit.runners.Parameterized.Parameters;
 import com.google.common.collect.Maps;
 
 @RunWith(Parameterized.class)
-public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
+public abstract class BaseViewIT extends ParallelStatsEnabledIT {
        
        protected String tableName;
+    protected String schemaName;
        protected String fullTableName;
        protected String tableDDLOptions;
-       protected String tableSuffix;
        protected boolean transactional;
 
     @BeforeClass
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
-        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 
Integer.toString(20));
         props.put(QueryServices.TRANSACTIONS_ENABLED, Boolean.toString(true));
+        // TODO: don't repeat this
+        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 
Long.toString(20));
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
     }
     
@@ -79,10 +81,10 @@ public abstract class BaseViewIT extends 
BaseOwnClusterHBaseManagedTimeIT {
                if (transactional) {
                        optionBuilder.append(" TRANSACTIONAL=true ");
                }
+               this.schemaName = TestUtil.DEFAULT_SCHEMA_NAME;
                this.tableDDLOptions = optionBuilder.toString();
-               tableSuffix = transactional ?  "_TXN" : "";
-               this.tableName = TestUtil.DEFAULT_DATA_TABLE_NAME + tableSuffix;
-        this.fullTableName = 
SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
+               this.tableName = "T_" + generateRandomString();
+        this.fullTableName = SchemaUtil.getTableName(schemaName, tableName);
        }
     
     @Parameters(name="transactional = {0}")
@@ -92,7 +94,7 @@ public abstract class BaseViewIT extends 
BaseOwnClusterHBaseManagedTimeIT {
     
     protected void testUpdatableViewWithIndex(Integer saltBuckets, boolean 
localIndex) throws Exception {
         String viewName = testUpdatableView(saltBuckets);
-        Pair<String,Scan> pair = testUpdatableViewIndex(saltBuckets, 
localIndex);
+        Pair<String,Scan> pair = testUpdatableViewIndex(saltBuckets, 
localIndex, viewName);
         Scan scan = pair.getSecond();
         String tableName = pair.getFirst();
         // Confirm that dropping the view also deletes the rows in the index
@@ -124,7 +126,7 @@ public abstract class BaseViewIT extends 
BaseOwnClusterHBaseManagedTimeIT {
                                tableDDLOptions+=",";
                        tableDDLOptions+=(" SALT_BUCKETS="+saltBuckets);
                }
-               String viewName = "V";
+               String viewName = "V_" + generateRandomString();
         String ddl = "CREATE TABLE " + fullTableName + " (k1 INTEGER NOT NULL, 
k2 INTEGER NOT NULL, k3 DECIMAL, s VARCHAR CONSTRAINT pk PRIMARY KEY (k1, k2, 
k3))" + tableDDLOptions;
         conn.createStatement().execute(ddl);
         ddl = "CREATE VIEW " + viewName + " AS SELECT * FROM " + fullTableName 
+ " WHERE k1 = 1";
@@ -138,10 +140,10 @@ public abstract class BaseViewIT extends 
BaseOwnClusterHBaseManagedTimeIT {
         rs = conn.createStatement().executeQuery("SELECT count(*) FROM " + 
fullTableName);
         assertTrue(rs.next());
         assertEquals(10, rs.getInt(1));
-        rs = conn.createStatement().executeQuery("SELECT count(*) FROM v");
+        rs = conn.createStatement().executeQuery("SELECT count(*) FROM " + 
viewName);
         assertTrue(rs.next());
         assertEquals(3, rs.getInt(1));
-        rs = conn.createStatement().executeQuery("SELECT k1, k2, k3 FROM v");
+        rs = conn.createStatement().executeQuery("SELECT k1, k2, k3 FROM " + 
viewName);
         assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
         assertEquals(101, rs.getInt(2));
@@ -156,10 +158,10 @@ public abstract class BaseViewIT extends 
BaseOwnClusterHBaseManagedTimeIT {
         assertEquals(2, rs.getInt(3));
         assertFalse(rs.next());
 
-        conn.createStatement().execute("UPSERT INTO v(k2,S,k3) 
VALUES(120,'foo',50.0)");
-        conn.createStatement().execute("UPSERT INTO v(k2,S,k3) 
VALUES(121,'bar',51.0)");
+        conn.createStatement().execute("UPSERT INTO " + viewName + "(k2,S,k3) 
VALUES(120,'foo',50.0)");
+        conn.createStatement().execute("UPSERT INTO " + viewName + "(k2,S,k3) 
VALUES(121,'bar',51.0)");
         conn.commit();
-        rs = conn.createStatement().executeQuery("SELECT k1, k2 FROM v WHERE 
k2 >= 120");
+        rs = conn.createStatement().executeQuery("SELECT k1, k2 FROM " + 
viewName + " WHERE k2 >= 120");
         assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
         assertEquals(120, rs.getInt(2));
@@ -171,27 +173,29 @@ public abstract class BaseViewIT extends 
BaseOwnClusterHBaseManagedTimeIT {
         return viewName;
     }
 
-    protected Pair<String,Scan> testUpdatableViewIndex(Integer saltBuckets) 
throws Exception {
-        return testUpdatableViewIndex(saltBuckets, false);
+    protected Pair<String,Scan> testUpdatableViewIndex(Integer saltBuckets, 
String viewName) throws Exception {
+        return testUpdatableViewIndex(saltBuckets, false, viewName);
     }
 
-    protected Pair<String,Scan> testUpdatableViewIndex(Integer saltBuckets, 
boolean localIndex) throws Exception {
+    protected Pair<String,Scan> testUpdatableViewIndex(Integer saltBuckets, 
boolean localIndex, String viewName) throws Exception {
         ResultSet rs;
         Connection conn = DriverManager.getConnection(getUrl());
+        String viewIndexName1 = "I_" + generateRandomString();
+        String viewIndexPhysicalName = 
MetaDataUtil.getViewIndexName(schemaName, tableName);
         if (localIndex) {
-            conn.createStatement().execute("CREATE LOCAL INDEX i1 on v(k3)");
+            conn.createStatement().execute("CREATE LOCAL INDEX " + 
viewIndexName1 + " on " + viewName + "(k3)");
         } else {
-            conn.createStatement().execute("CREATE INDEX i1 on v(k3) include 
(s)");
+            conn.createStatement().execute("CREATE INDEX " + viewIndexName1 + 
" on " + viewName + "(k3) include (s)");
         }
-        conn.createStatement().execute("UPSERT INTO v(k2,S,k3) 
VALUES(120,'foo',50.0)");
+        conn.createStatement().execute("UPSERT INTO " + viewName + "(k2,S,k3) 
VALUES(120,'foo',50.0)");
         conn.commit();
 
-        analyzeTable(conn, "v");        
-        List<KeyRange> splits = getAllSplits(conn, "i1");
+        analyzeTable(conn, viewName);        
+        List<KeyRange> splits = getAllSplits(conn, viewIndexName1);
         // More guideposts with salted, since it's already pre-split at salt 
buckets
         assertEquals(saltBuckets == null ? 6 : 8, splits.size());
         
-        String query = "SELECT k1, k2, k3, s FROM v WHERE k3 = 51.0";
+        String query = "SELECT k1, k2, k3, s FROM " + viewName + " WHERE k3 = 
51.0";
         rs = conn.createStatement().executeQuery(query);
         assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
@@ -202,34 +206,35 @@ public abstract class BaseViewIT extends 
BaseOwnClusterHBaseManagedTimeIT {
         rs = conn.createStatement().executeQuery("EXPLAIN " + query);
         String queryPlan = QueryUtil.getExplainPlan(rs);
         if (localIndex) {
-            assertEquals("CLIENT PARALLEL "+ (saltBuckets == null ? 1 : 
saltBuckets)  +"-WAY RANGE SCAN OVER " + tableName +" [1,51]\n"
+            assertEquals("CLIENT PARALLEL "+ (saltBuckets == null ? 1 : 
saltBuckets)  +"-WAY RANGE SCAN OVER " + fullTableName +" [1,51]\n"
                     + "    SERVER FILTER BY FIRST KEY ONLY\n"
                     + "CLIENT MERGE SORT",
                 queryPlan);
         } else {
             assertEquals(saltBuckets == null
-                    ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + 
tableName +" [" + Short.MIN_VALUE + ",51]"
-                            : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE 
SCAN OVER _IDX_T" + (transactional ? "_TXN" : "") + " [0," + Short.MIN_VALUE + 
",51] - ["+(saltBuckets.intValue()-1)+"," + Short.MIN_VALUE + ",51]\nCLIENT 
MERGE SORT",
+                    ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + 
viewIndexPhysicalName +" [" + Short.MIN_VALUE + ",51]"
+                            : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE 
SCAN OVER " + viewIndexPhysicalName + " [0," + Short.MIN_VALUE + ",51] - 
["+(saltBuckets.intValue()-1)+"," + Short.MIN_VALUE + ",51]\nCLIENT MERGE SORT",
                             queryPlan);
         }
 
+        String viewIndexName2 = "I_" + generateRandomString();
         if (localIndex) {
-            conn.createStatement().execute("CREATE LOCAL INDEX i2 on v(s)");
+            conn.createStatement().execute("CREATE LOCAL INDEX " + 
viewIndexName2 + " on " + viewName + "(s)");
         } else {
-            conn.createStatement().execute("CREATE INDEX i2 on v(s)");
+            conn.createStatement().execute("CREATE INDEX " + viewIndexName2 + 
" on " + viewName + "(s)");
         }
         
         // new index hasn't been analyzed yet
-        splits = getAllSplits(conn, "i2");
+        splits = getAllSplits(conn, viewIndexName2);
         assertEquals(saltBuckets == null ? 1 : 3, splits.size());
         
         // analyze table should analyze all view data
-        analyzeTable(conn, tableName);        
-        splits = getAllSplits(conn, "i2");
+        analyzeTable(conn, fullTableName);        
+        splits = getAllSplits(conn, viewIndexName2);
         assertEquals(saltBuckets == null ? 6 : 8, splits.size());
 
         
-        query = "SELECT k1, k2, s FROM v WHERE s = 'foo'";
+        query = "SELECT k1, k2, s FROM " + viewName + " WHERE s = 'foo'";
         Statement statement = conn.createStatement();
         rs = statement.executeQuery(query);
         Scan scan = 
statement.unwrap(PhoenixStatement.class).getQueryPlan().getContext().getScan();
@@ -238,24 +243,24 @@ public abstract class BaseViewIT extends 
BaseOwnClusterHBaseManagedTimeIT {
         assertEquals(120, rs.getInt(2));
         assertEquals("foo", rs.getString(3));
         assertFalse(rs.next());
-        String htableName;
         rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+        String physicalTableName;
         if (localIndex) {
-            htableName = tableName;
-            assertEquals("CLIENT PARALLEL "+ (saltBuckets == null ? 1 : 
saltBuckets)  +"-WAY RANGE SCAN OVER " + htableName +" [" + (2) + ",'foo']\n"
+            physicalTableName = tableName;
+            assertEquals("CLIENT PARALLEL "+ (saltBuckets == null ? 1 : 
saltBuckets)  +"-WAY RANGE SCAN OVER " + fullTableName +" [" + (2) + ",'foo']\n"
                     + "    SERVER FILTER BY FIRST KEY ONLY\n"
                     + "CLIENT MERGE SORT",QueryUtil.getExplainPlan(rs));
         } else {
-            htableName = "_IDX_" + tableName;
+            physicalTableName = viewIndexPhysicalName;
             assertEquals(saltBuckets == null
-                    ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + htableName +" 
[" + (Short.MIN_VALUE+1) + ",'foo']\n"
+                    ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + 
viewIndexPhysicalName +" [" + (Short.MIN_VALUE+1) + ",'foo']\n"
                             + "    SERVER FILTER BY FIRST KEY ONLY"
-                            : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE 
SCAN OVER " + htableName + " [0," + (Short.MIN_VALUE+1) + ",'foo'] - 
["+(saltBuckets.intValue()-1)+"," + (Short.MIN_VALUE+1) + ",'foo']\n"
+                            : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE 
SCAN OVER " + viewIndexPhysicalName + " [0," + (Short.MIN_VALUE+1) + ",'foo'] - 
["+(saltBuckets.intValue()-1)+"," + (Short.MIN_VALUE+1) + ",'foo']\n"
                                     + "    SERVER FILTER BY FIRST KEY ONLY\n"
                                     + "CLIENT MERGE SORT",
                             QueryUtil.getExplainPlan(rs));
         }
         conn.close();
-        return new Pair<>(htableName,scan);
+        return new Pair<>(physicalTableName,scan);
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CountDistinctCompressionIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CountDistinctCompressionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CountDistinctCompressionIT.java
index 677d76f..aa7a89c 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CountDistinctCompressionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CountDistinctCompressionIT.java
@@ -40,7 +40,7 @@ import org.junit.Test;
 import com.google.common.collect.Maps;
 
 
-public class CountDistinctCompressionIT extends 
BaseOwnClusterHBaseManagedTimeIT {
+public class CountDistinctCompressionIT extends BaseOwnClusterIT {
     @BeforeClass
     public static void doSetup() throws Exception {
         Map<String, String> props = Maps.newHashMapWithExpectedSize(3);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
index 4971fc3..6aaaeb1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
@@ -48,7 +48,7 @@ import org.junit.Test;
 
 import com.google.common.collect.Maps;
 
-public class CsvBulkLoadToolIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class CsvBulkLoadToolIT extends BaseOwnClusterIT {
 
     private static Connection conn;
     private static String zkQuorum;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
index b23e342..8314850 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
@@ -64,7 +64,7 @@ import com.google.common.collect.Maps;
  * Tests for the {@link IndexTool}
  */
 @RunWith(Parameterized.class)
-public class IndexExtendedIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class IndexExtendedIT extends BaseOwnClusterIT {
     private final boolean localIndex;
     private final boolean transactional;
     private final boolean directApi;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
index 50e0709..7ec37ce 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
@@ -17,7 +17,6 @@
  */
 package org.apache.phoenix.end2end;
 
-import static org.apache.phoenix.util.TestUtil.KEYONLY_NAME;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.apache.phoenix.util.TestUtil.analyzeTable;
 import static org.apache.phoenix.util.TestUtil.getAllSplits;
@@ -29,48 +28,41 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
+import java.sql.SQLException;
 import java.util.List;
-import java.util.Map;
 import java.util.Properties;
 
 import org.apache.phoenix.query.KeyRange;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.TestUtil;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Test;
 
-import com.google.common.collect.Maps;
 
-
-public class KeyOnlyIT extends BaseOwnClusterClientManagedTimeIT {
+public class KeyOnlyIT extends ParallelStatsEnabledIT {
+    private String tableName;
     
-    @BeforeClass
-    public static void doSetup() throws Exception {
-        Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
-        // Must update config before starting server
-        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 
Long.toString(20));
-        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+    @Before
+    public void createTable() throws SQLException {
+        tableName = generateRandomString();
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+            conn.createStatement().execute("create table " + tableName +
+                "   (i1 integer not null, i2 integer not null\n" +
+                "    CONSTRAINT pk PRIMARY KEY (i1,i2))");
+        }
     }
     
     @Test
     public void testKeyOnly() throws Exception {
-        long ts = nextTimestamp();
-        ensureTableCreated(getUrl(),KEYONLY_NAME,KEYONLY_NAME,null, ts);
-        initTableValues(ts+1);
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts+30));
-        Connection conn3 = DriverManager.getConnection(getUrl(), props);
-        analyzeTable(conn3, KEYONLY_NAME);
-        conn3.close();
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        initTableValues(conn);
+        analyzeTable(conn, tableName);
         
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts+50));
-        Connection conn5 = DriverManager.getConnection(getUrl(), props);
-        String query = "SELECT i1, i2 FROM KEYONLY";
-        PreparedStatement statement = conn5.prepareStatement(query);
+        String query = "SELECT i1, i2 FROM " + tableName;
+        PreparedStatement statement = conn.prepareStatement(query);
         ResultSet rs = statement.executeQuery();
         assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
@@ -79,36 +71,24 @@ public class KeyOnlyIT extends 
BaseOwnClusterClientManagedTimeIT {
         assertEquals(3, rs.getInt(1));
         assertEquals(4, rs.getInt(2));
         assertFalse(rs.next());
-        List<KeyRange> splits = getAllSplits(conn5, "KEYONLY");
+        List<KeyRange> splits = getAllSplits(conn, tableName);
         assertEquals(3, splits.size());
-        conn5.close();
         
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts+60));
-        Connection conn6 = DriverManager.getConnection(getUrl(), props);
-        conn6.createStatement().execute("ALTER TABLE KEYONLY ADD s1 varchar");
-        conn6.close();
+        conn.createStatement().execute("ALTER TABLE " + tableName + " ADD s1 
varchar");
         
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts+70));
-        Connection conn7 = DriverManager.getConnection(getUrl(), props);
-        PreparedStatement stmt = conn7.prepareStatement(
+        PreparedStatement stmt = conn.prepareStatement(
                 "upsert into " +
-                "KEYONLY VALUES (?, ?, ?)");
+                tableName + " VALUES (?, ?, ?)");
         stmt.setInt(1, 5);
         stmt.setInt(2, 6);
         stmt.setString(3, "foo");
         stmt.execute();
-        conn7.commit();
-        conn7.close();
+        conn.commit();
         
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts+80));
-        Connection conn8 = DriverManager.getConnection(getUrl(), props);
-        analyzeTable(conn8, KEYONLY_NAME);
-        conn8.close();
+        analyzeTable(conn, tableName);
 
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts+90));
-        Connection conn9 = DriverManager.getConnection(getUrl(), props);
-        query = "SELECT i1 FROM KEYONLY";
-        statement = conn9.prepareStatement(query);
+        query = "SELECT i1 FROM " + tableName;
+        statement = conn.prepareStatement(query);
         rs = statement.executeQuery();
         assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
@@ -118,8 +98,8 @@ public class KeyOnlyIT extends 
BaseOwnClusterClientManagedTimeIT {
         assertEquals(5, rs.getInt(1));
         assertFalse(rs.next());
         
-        query = "SELECT i1,s1 FROM KEYONLY";
-        statement = conn9.prepareStatement(query);
+        query = "SELECT i1,s1 FROM " + tableName;
+        statement = conn.prepareStatement(query);
         rs = statement.executeQuery();
         assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
@@ -131,67 +111,49 @@ public class KeyOnlyIT extends 
BaseOwnClusterClientManagedTimeIT {
         assertEquals(5, rs.getInt(1));
         assertEquals("foo", rs.getString(2));
         assertFalse(rs.next());
-
-        conn9.close();
     }
     
     @Test
     public void testOr() throws Exception {
-        long ts = nextTimestamp();
-        ensureTableCreated(getUrl(),KEYONLY_NAME,KEYONLY_NAME,null, ts);
-        initTableValues(ts+1);
         Properties props = new Properties();
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        initTableValues(conn);
+        analyzeTable(conn, tableName);
         
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts+3));
-        Connection conn3 = DriverManager.getConnection(getUrl(), props);
-        analyzeTable(conn3, KEYONLY_NAME);
-        conn3.close();
-        
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts+5));
-        Connection conn5 = DriverManager.getConnection(getUrl(), props);
-        String query = "SELECT i1 FROM KEYONLY WHERE i1 < 2 or i1 = 3";
-        PreparedStatement statement = conn5.prepareStatement(query);
+        String query = "SELECT i1 FROM " + tableName + " WHERE i1 < 2 or i1 = 
3";
+        PreparedStatement statement = conn.prepareStatement(query);
         ResultSet rs = statement.executeQuery();
         assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
         assertTrue(rs.next());
         assertEquals(3, rs.getInt(1));
         assertFalse(rs.next());
-        conn5.close();
     }
         
     @Test
     public void testQueryWithLimitAndStats() throws Exception {
-        long ts = nextTimestamp();
-        ensureTableCreated(getUrl(),KEYONLY_NAME,KEYONLY_NAME,null, ts);
-        initTableValues(ts+1, 100);
-        
-        TestUtil.analyzeTable(getUrl(), ts+10, KEYONLY_NAME);
         Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);        
+        initTableValues(conn, 100);
+        analyzeTable(conn, tableName);
         
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts+50));
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        String query = "SELECT i1 FROM KEYONLY LIMIT 1";
+        String query = "SELECT i1 FROM " + tableName + " LIMIT 1";
         ResultSet rs = conn.createStatement().executeQuery(query);
         assertTrue(rs.next());
         assertEquals(0, rs.getInt(1));
         assertFalse(rs.next());
         
         rs = conn.createStatement().executeQuery("EXPLAIN " + query);
-        assertEquals("CLIENT SERIAL 1-WAY FULL SCAN OVER KEYONLY\n" + 
+        assertEquals("CLIENT SERIAL 1-WAY FULL SCAN OVER " + tableName + "\n" 
+ 
                 "    SERVER FILTER BY FIRST KEY ONLY\n" + 
                 "    SERVER 1 ROW LIMIT\n" + 
                 "CLIENT 1 ROW LIMIT", QueryUtil.getExplainPlan(rs));
-        conn.close();
     }
     
-    protected static void initTableValues(long ts) throws Exception {
-        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ ts;
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(url, props);
+    private void initTableValues(Connection conn) throws Exception {
         PreparedStatement stmt = conn.prepareStatement(
             "upsert into " +
-            "KEYONLY VALUES (?, ?)");
+            tableName + " VALUES (?, ?)");
         stmt.setInt(1, 1);
         stmt.setInt(2, 2);
         stmt.execute();
@@ -201,16 +163,12 @@ public class KeyOnlyIT extends 
BaseOwnClusterClientManagedTimeIT {
         stmt.execute();
         
         conn.commit();
-        conn.close();
     }
 
-    protected static void initTableValues(long ts, int nRows) throws Exception 
{
-        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ ts;
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(url, props);
+    private void initTableValues(Connection conn, int nRows) throws Exception {
         PreparedStatement stmt = conn.prepareStatement(
             "upsert into " +
-            "KEYONLY VALUES (?, ?)");
+             tableName + " VALUES (?, ?)");
         for (int i = 0; i < nRows; i++) {
                stmt.setInt(1, i);
                stmt.setInt(2, i+1);
@@ -218,6 +176,5 @@ public class KeyOnlyIT extends 
BaseOwnClusterClientManagedTimeIT {
         }
         
         conn.commit();
-        conn.close();
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
index b9d27ca..4a0bb01 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
@@ -29,50 +29,44 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
+import java.sql.SQLException;
 import java.util.List;
-import java.util.Map;
 import java.util.Properties;
 
 import org.apache.phoenix.query.KeyRange;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.junit.BeforeClass;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Before;
 import org.junit.Test;
 
-import com.google.common.collect.Maps;
 
+public class MultiCfQueryExecIT extends ParallelStatsEnabledIT {
+    private String fullTableName;
 
-public class MultiCfQueryExecIT extends BaseOwnClusterClientManagedTimeIT {
-    private static final String MULTI_CF = "MULTI_CF";
-    
-    @BeforeClass
-    public static void doSetup() throws Exception {
-        Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
-        // Must update config before starting server
-        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 
Long.toString(20));
-        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+    @Before
+    public void generateTableNames() throws SQLException {
+        String schemaName = TestUtil.DEFAULT_SCHEMA_NAME;
+        String tableName = "T_" + generateRandomString();
+        fullTableName = SchemaUtil.getTableName(schemaName, tableName);
     }
-    
-    protected static void initTableValues(long ts) throws Exception {
-        ensureTableCreated(getUrl(),MULTI_CF,MULTI_CF,null, ts-2);
-        
-        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ ts;
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(url, props);
-        conn.setAutoCommit(true);
+
+    private void createTable(Connection conn) throws SQLException {
+        conn.createStatement().execute(
+                "create table " + fullTableName + "   (id char(15) not null 
primary key,\n"
+                        + "    a.unique_user_count integer,\n" + "    
b.unique_org_count integer,\n"
+                        + "    c.db_cpu_utilization decimal(31,10),\n" + "    
d.transaction_count bigint,\n"
+                        + "    e.cpu_utilization decimal(31,10),\n" + "    
f.response_time bigint,\n"
+                        + "    g.response_time bigint)");
+    }
+
+    private void initTableValues(Connection conn) throws Exception {
         // Insert all rows at ts
         PreparedStatement stmt = conn.prepareStatement(
-                "upsert into " +
-                "MULTI_CF(" +
-                "    ID, " +
-                "    TRANSACTION_COUNT, " +
-                "    CPU_UTILIZATION, " +
-                "    DB_CPU_UTILIZATION," +
-                "    UNIQUE_USER_COUNT," +
-                "    F.RESPONSE_TIME," +
-                "    G.RESPONSE_TIME)" +
+"upsert into " + fullTableName + "(" + "    ID, "
+                + "    TRANSACTION_COUNT, " + "    CPU_UTILIZATION, " + "    
DB_CPU_UTILIZATION,"
+                + "    UNIQUE_USER_COUNT," + "    F.RESPONSE_TIME," + "    
G.RESPONSE_TIME)"
+                +
                 "VALUES (?, ?, ?, ?, ?, ?, ?)");
         stmt.setString(1, "000000000000001");
         stmt.setInt(2, 100);
@@ -90,18 +84,18 @@ public class MultiCfQueryExecIT extends 
BaseOwnClusterClientManagedTimeIT {
         stmt.setLong(6, 2222);
         stmt.setLong(7, 22222);
         stmt.execute();
+        conn.commit();
     }
 
     @Test
     public void testConstantCount() throws Exception {
-        long ts = nextTimestamp();
-        String query = "SELECT count(1) from multi_cf";
-        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ (ts + 5); // Run query at timestamp 5
+        String query = "SELECT count(1) from " + fullTableName;
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(url, props);
         try {
-            initTableValues(ts);
-            analyzeTable(getUrl(), ts + 3, "MULTI_CF");
+            createTable(conn);
+            initTableValues(conn);
+            analyzeTable(conn, fullTableName);
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
@@ -111,17 +105,16 @@ public class MultiCfQueryExecIT extends 
BaseOwnClusterClientManagedTimeIT {
             conn.close();
         }
     }
-    
+
     @Test
     public void testCFToDisambiguateInSelectOnly1() throws Exception {
-        long ts = nextTimestamp();
-        String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from multi_cf 
where ID = '000000000000002'";
-        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ (ts + 5); // Run query at timestamp 5
+        String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from " + 
fullTableName + " where ID = '000000000000002'";
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(url, props);
         try {
-            initTableValues(ts);
-            analyzeTable(getUrl(), ts + 3, "MULTI_CF");
+            createTable(conn);
+            initTableValues(conn);
+            analyzeTable(conn, fullTableName);
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
@@ -132,17 +125,16 @@ public class MultiCfQueryExecIT extends 
BaseOwnClusterClientManagedTimeIT {
             conn.close();
         }
     }
-    
+
     @Test
     public void testCFToDisambiguateInSelectOnly2() throws Exception {
-        long ts = nextTimestamp();
-        String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from multi_cf 
where TRANSACTION_COUNT = 200";
-        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ (ts + 5); // Run query at timestamp 5
+        String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from " + 
fullTableName + " where TRANSACTION_COUNT = 200";
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(url, props);
         try {
-            initTableValues(ts);
-            analyzeTable(getUrl(), ts + 3, "MULTI_CF");
+            createTable(conn);
+            initTableValues(conn);
+            analyzeTable(conn, fullTableName);
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
@@ -153,17 +145,16 @@ public class MultiCfQueryExecIT extends 
BaseOwnClusterClientManagedTimeIT {
             conn.close();
         }
     }
-    
+
     @Test
     public void testGuidePostsForMultiCFs() throws Exception {
-        long ts = nextTimestamp();
-        initTableValues(ts);
-        String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from multi_cf 
where F.RESPONSE_TIME = 2222";
-        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ (ts + 5); // Run query at timestamp 5
+        String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from " + 
fullTableName + " where F.RESPONSE_TIME = 2222";
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(url, props);
         try {
-            analyzeTable(getUrl(), ts + 3, "MULTI_CF");
+            createTable(conn);
+            initTableValues(conn);
+            analyzeTable(conn, fullTableName);
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
@@ -172,11 +163,11 @@ public class MultiCfQueryExecIT extends 
BaseOwnClusterClientManagedTimeIT {
             assertFalse(rs.next());
             // Use E column family. Since the column family with the empty key 
value (the first one, A)
             // is always added to the scan, we never really use other 
guideposts (but this may change).
-            List<KeyRange> splits = getAllSplits(conn, "MULTI_CF", 
"e.cpu_utilization IS NOT NULL", "COUNT(*)");
+            List<KeyRange> splits = getAllSplits(conn, fullTableName, 
"e.cpu_utilization IS NOT NULL", "COUNT(*)");
             // Since the E column family is not populated, it won't have as 
many splits
             assertEquals(3, splits.size());
             // Same as above for G column family.
-            splits = getAllSplits(conn, "MULTI_CF", "g.response_time IS NOT 
NULL", "COUNT(*)");
+            splits = getAllSplits(conn, fullTableName, "g.response_time IS NOT 
NULL", "COUNT(*)");
             assertEquals(3, splits.size());
         } finally {
             conn.close();
@@ -185,48 +176,38 @@ public class MultiCfQueryExecIT extends 
BaseOwnClusterClientManagedTimeIT {
 
     @Test
     public void testGuidePostsForMultiCFsOverUnevenDistrib() throws Exception {
-        long ts = nextTimestamp();
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 10));
         Connection conn = DriverManager.getConnection(getUrl(), props);
-        
-        conn.createStatement().execute("CREATE TABLE T_6CF (K1 CHAR(1) NOT 
NULL, "
-                + "K2 VARCHAR NOT NULL, "
-                + "CF1.A INTEGER, "
-                + "CF2.B INTEGER, "
-                + "CF3.C INTEGER, "
-                + "CF4.D INTEGER, "
-                + "CF5.E INTEGER, "
-                + "CF6.F INTEGER "
-                + "CONSTRAINT PK PRIMARY KEY (K1,K2)) SPLIT ON ('B','C','D')");
 
-        conn.close();
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 20));
-        conn = DriverManager.getConnection(getUrl(), props);
+        conn.createStatement().execute(
+                "CREATE TABLE " + fullTableName + " (K1 CHAR(1) NOT NULL, "
+ + "K2 VARCHAR NOT NULL, " + "CF1.A INTEGER, "
+                        + "CF2.B INTEGER, " + "CF3.C INTEGER, " + "CF4.D 
INTEGER, " + "CF5.E INTEGER, "
+                        + "CF6.F INTEGER " + "CONSTRAINT PK PRIMARY KEY 
(K1,K2)) SPLIT ON ('B','C','D')");
+
         for (int i = 0; i < 100; i++) {
-            String upsert = "UPSERT INTO T_6CF(K1,K2,A) VALUES('" + 
Character.toString((char)('A'+i%10)) + "','" + (i*10) + "'," + i + ")";
+            String upsert = "UPSERT INTO " + fullTableName + "(K1,K2,A) 
VALUES('" + Character.toString((char)('A' + i % 10))
+                    + "','" + (i * 10) + "'," + i + ")";
             conn.createStatement().execute(upsert);
             if (i % 10 == 0) {
-                conn.createStatement().execute("UPSERT INTO T_6CF(K1,K2,F) 
VALUES('" + Character.toString((char)('A'+i%10)) + "','" + (i*10) + "'," + (i * 
10) + ")");
+                conn.createStatement().execute(
+                        "UPSERT INTO " + fullTableName + "(K1,K2,F) VALUES('" 
+ Character.toString((char)('A' + i % 10))
+                                + "','" + (i * 10) + "'," + (i * 10) + ")");
             }
         }
         conn.commit();
-        conn.close();
-        
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 40));
-        conn = DriverManager.getConnection(getUrl(), props);
         try {
-            analyzeTable(getUrl(), ts + 30, "T_6CF");
-            PreparedStatement statement = conn.prepareStatement("select 
count(*) from T_6CF where f < 400");
+            analyzeTable(conn, fullTableName);
+            PreparedStatement statement = conn.prepareStatement("select 
count(*) from " + fullTableName + " where f < 400");
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
             assertEquals(4, rs.getLong(1));
             assertFalse(rs.next());
-            List<KeyRange> splits = getAllSplits(conn, "T_6CF", "f < 400", 
"COUNT(*)");
+            List<KeyRange> splits = getAllSplits(conn, fullTableName, "f < 
400", "COUNT(*)");
             // Uses less populated column f
             assertEquals(14, splits.size());
             // Uses more populated column a
-            splits = getAllSplits(conn, "T_6CF", "a < 80", "COUNT(*)");
+            splits = getAllSplits(conn, fullTableName, "a < 80", "COUNT(*)");
             assertEquals(104, splits.size());
         } finally {
             conn.close();
@@ -235,77 +216,67 @@ public class MultiCfQueryExecIT extends 
BaseOwnClusterClientManagedTimeIT {
 
     @Test
     public void testGuidePostsRetrievedForMultiCF() throws Exception {
-      Connection conn;
-      PreparedStatement stmt;
-      ResultSet rs;
+        Connection conn;
+        PreparedStatement stmt;
+        ResultSet rs;
 
-      long ts = nextTimestamp();
-      Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-      props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 
10));
-      conn = DriverManager.getConnection(getUrl(), props);
-      conn.createStatement()
-              .execute(
-                      "CREATE TABLE T (  k INTEGER PRIMARY KEY, A.V1 VARCHAR, 
B.V2 VARCHAR, C.V3 VARCHAR)");
-      conn.close();
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        conn = DriverManager.getConnection(getUrl(), props);
+        conn.createStatement().execute(
+                "CREATE TABLE " + fullTableName + " (  k INTEGER PRIMARY KEY, 
A.V1 VARCHAR, B.V2 VARCHAR, C.V3 VARCHAR)");
 
-      props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 
30));
-      conn = DriverManager.getConnection(getUrl(), props);
-      stmt = conn.prepareStatement("UPSERT INTO T VALUES(?,?,?,?)");
-      stmt.setInt(1, 1);
-      stmt.setString(2, "A");
-      stmt.setString(3, "B");
-      stmt.setString(4, "C");
-      stmt.execute();
-      conn.commit();
-      
-      stmt = conn.prepareStatement("UPSERT INTO T VALUES(?,?,?,?)");
-      stmt.setInt(1, 2);
-      stmt.setString(2, "D");
-      stmt.setString(3, "E");
-      stmt.setString(4, "F");
-      stmt.execute();
-      conn.commit();
-      
-      stmt = conn.prepareStatement("UPSERT INTO T(k, A.V1, C.V3) 
VALUES(?,?,?)");
-      stmt.setInt(1, 3);
-      stmt.setString(2, "E");
-      stmt.setString(3, "X");
-      stmt.execute();
-      conn.commit();
-      
-      stmt = conn.prepareStatement("UPSERT INTO T(k, A.V1, C.V3) 
VALUES(?,?,?)");
-      stmt.setInt(1, 4);
-      stmt.setString(2, "F");
-      stmt.setString(3, "F");
-      stmt.execute();
-      conn.commit();
- 
-      conn.close();
- 
-      analyzeTable(getUrl(), ts + 50, "T");
+        stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + " 
VALUES(?,?,?,?)");
+        stmt.setInt(1, 1);
+        stmt.setString(2, "A");
+        stmt.setString(3, "B");
+        stmt.setString(4, "C");
+        stmt.execute();
+        conn.commit();
 
-      props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 
60));
-      conn = DriverManager.getConnection(getUrl(), props);
-      rs = conn.createStatement().executeQuery("SELECT B.V2 FROM T WHERE B.V2 
= 'B'");
-      assertTrue(rs.next());
-      assertEquals("B",rs.getString(1));
-      List<KeyRange> splits = getAllSplits(conn, "T", "C.V3 = 'X'", "A.V1");
-      assertEquals(5, splits.size());
-      splits = getAllSplits(conn, "T", "B.V2 = 'B'", "B.V2");
-      assertEquals(3, splits.size());
-      conn.close();
+        stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + " 
VALUES(?,?,?,?)");
+        stmt.setInt(1, 2);
+        stmt.setString(2, "D");
+        stmt.setString(3, "E");
+        stmt.setString(4, "F");
+        stmt.execute();
+        conn.commit();
+
+        stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + "(k, 
A.V1, C.V3) VALUES(?,?,?)");
+        stmt.setInt(1, 3);
+        stmt.setString(2, "E");
+        stmt.setString(3, "X");
+        stmt.execute();
+        conn.commit();
+
+        stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + "(k, 
A.V1, C.V3) VALUES(?,?,?)");
+        stmt.setInt(1, 4);
+        stmt.setString(2, "F");
+        stmt.setString(3, "F");
+        stmt.execute();
+        conn.commit();
+
+        analyzeTable(conn, fullTableName);
+
+        rs = conn.createStatement().executeQuery("SELECT B.V2 FROM " + 
fullTableName + " WHERE B.V2 = 'B'");
+        assertTrue(rs.next());
+        assertEquals("B", rs.getString(1));
+        List<KeyRange> splits = getAllSplits(conn, fullTableName, "C.V3 = 
'X'", "A.V1");
+        assertEquals(5, splits.size());
+        splits = getAllSplits(conn, fullTableName, "B.V2 = 'B'", "B.V2");
+        assertEquals(3, splits.size());
+        conn.close();
     }
 
     @Test
     public void testCFToDisambiguate2() throws Exception {
-        long ts = nextTimestamp();
-        String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from multi_cf 
where G.RESPONSE_TIME-1 = F.RESPONSE_TIME";
-        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ (ts + 5); // Run query at timestamp 5
+        String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from " + 
fullTableName
+                + " where G.RESPONSE_TIME-1 = F.RESPONSE_TIME";
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(url, props);
         try {
-            initTableValues(ts);
-            analyzeTable(getUrl(), ts + 3, "MULTI_CF");
+            createTable(conn);
+            initTableValues(conn);
+            analyzeTable(conn, fullTableName);
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
@@ -316,35 +287,25 @@ public class MultiCfQueryExecIT extends 
BaseOwnClusterClientManagedTimeIT {
             conn.close();
         }
     }
-    
+
     @Test
     public void testDefaultCFToDisambiguate() throws Exception {
-        long ts = nextTimestamp();
-        initTableValues(ts);
-        String ddl = "ALTER TABLE multi_cf ADD response_time BIGINT";
-        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ (ts + 10);
-        Connection conn = DriverManager.getConnection(url);
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(url, props);
+        createTable(conn);
+        initTableValues(conn);
+
+        String ddl = "ALTER TABLE " + fullTableName + " ADD response_time 
BIGINT";
         conn.createStatement().execute(ddl);
-        conn.close();
-        
-        analyzeTable(getUrl(), ts + 15, "MULTI_CF");
-       
-        String dml = "upsert into " +
-        "MULTI_CF(" +
-        "    ID, " +
-        "    RESPONSE_TIME)" +
-        "VALUES ('000000000000003', 333)";
-        url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 
20); 
-        conn = DriverManager.getConnection(url);
+
+        String dml = "upsert into " + fullTableName + "(" + "    ID, " + "    
RESPONSE_TIME)"
+                + "VALUES ('000000000000003', 333)";
         conn.createStatement().execute(dml);
         conn.commit();
-        conn.close();
-        
-        analyzeTable(getUrl(), ts + 25, "MULTI_CF");
-        
-        String query = "SELECT ID,RESPONSE_TIME from multi_cf where 
RESPONSE_TIME = 333";
-        url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 
30); // Run query at timestamp 5
-        conn = DriverManager.getConnection(url);
+
+        analyzeTable(conn, fullTableName);
+
+        String query = "SELECT ID,RESPONSE_TIME from " + fullTableName + " 
where RESPONSE_TIME = 333";
         try {
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
@@ -356,17 +317,16 @@ public class MultiCfQueryExecIT extends 
BaseOwnClusterClientManagedTimeIT {
             conn.close();
         }
     }
-    
+
     @Test
     public void testEssentialColumnFamilyForRowKeyFilter() throws Exception {
-        long ts = nextTimestamp();
-        String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from multi_cf 
where SUBSTR(ID, 15) = '2'";
-        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ (ts + 5); // Run query at timestamp 5
+        String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from " + 
fullTableName + " where SUBSTR(ID, 15) = '2'";
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(url, props);
         try {
-            initTableValues(ts);
-            analyzeTable(getUrl(), ts + 3, "MULTI_CF");
+            createTable(conn);
+            initTableValues(conn);
+            analyzeTable(conn, fullTableName);
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
index 4e1e983..dfcf68c 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
@@ -17,13 +17,12 @@
  */
 package org.apache.phoenix.end2end;
 
-import static org.apache.phoenix.util.TestUtil.STABLE_NAME;
+import static org.apache.phoenix.util.TestUtil.STABLE_PK_NAME;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.apache.phoenix.util.TestUtil.analyzeTable;
 import static org.apache.phoenix.util.TestUtil.analyzeTableColumns;
 import static org.apache.phoenix.util.TestUtil.analyzeTableIndex;
 import static org.apache.phoenix.util.TestUtil.getAllSplits;
-import static org.apache.phoenix.util.TestUtil.getSplits;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 
@@ -31,8 +30,9 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Collections;
 import java.util.List;
-import java.util.Map;
 
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Scan;
@@ -40,18 +40,16 @@ import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.KeyRange;
-import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.types.PChar;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.junit.BeforeClass;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Before;
 import org.junit.Test;
 
-import com.google.common.collect.Maps;
+import com.google.common.base.Joiner;
 
 
-public class ParallelIteratorsIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class ParallelIteratorsIT extends ParallelStatsEnabledIT {
 
-    private static final String STABLE_INDEX = "STABLE_INDEX";
     protected static final byte[] KMIN  = new byte[] {'!'};
     protected static final byte[] KMIN2  = new byte[] {'.'};
     protected static final byte[] K1  = new byte[] {'a'};
@@ -66,27 +64,39 @@ public class ParallelIteratorsIT extends 
BaseOwnClusterHBaseManagedTimeIT {
     protected static final byte[] KMAX2  = new byte[] {'z'};
     protected static final byte[] KR = new byte[] { 'r' };
     protected static final byte[] KP = new byte[] { 'p' };
+
+    private String tableName;
+    private String indexName;
+    
+    @Before
+    public void generateTableNames() {
+        tableName = "T_" + generateRandomString();
+        indexName = "I_" + generateRandomString();
+    }
     
-    @BeforeClass
-    public static void doSetup() throws Exception {
-        Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
-        // Must update config before starting server
-        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 
Long.toString(20));
-        props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
-        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+    private List<KeyRange> getSplits(Connection conn, byte[] lowerRange, 
byte[] upperRange) throws SQLException {
+        return TestUtil.getSplits(conn, tableName, STABLE_PK_NAME, lowerRange, 
upperRange, null, "COUNT(*)");
     }
 
     @Test
     public void testGetSplits() throws Exception {
         Connection conn = DriverManager.getConnection(getUrl(), 
TEST_PROPERTIES);
-        initTableValues(conn);
-        
-        PreparedStatement stmt = conn.prepareStatement("UPDATE STATISTICS 
STABLE");
+        byte[][] splits = new byte[][] {K3,K4,K9,K11};
+        createTable(conn, splits);
+        PreparedStatement stmt = conn.prepareStatement("upsert into " + 
tableName + " VALUES (?, ?)");
+        stmt.setString(1, new String(KMIN));
+        stmt.setInt(2, 1);
         stmt.execute();
+        stmt.setString(1, new String(KMAX));
+        stmt.setInt(2, 2);
+        stmt.execute();
+        conn.commit();
+        
+        conn.createStatement().execute("UPDATE STATISTICS " + tableName);
         
         List<KeyRange> keyRanges;
         
-        keyRanges = getAllSplits(conn);
+        keyRanges = getAllSplits(conn, tableName);
         assertEquals("Unexpected number of splits: " + keyRanges, 7, 
keyRanges.size());
         assertEquals(newKeyRange(KeyRange.UNBOUND, KMIN), keyRanges.get(0));
         assertEquals(newKeyRange(KMIN, K3), keyRanges.get(1));
@@ -116,10 +126,10 @@ public class ParallelIteratorsIT extends 
BaseOwnClusterHBaseManagedTimeIT {
     public void testServerNameOnScan() throws Exception {
         Connection conn = DriverManager.getConnection(getUrl(), 
TEST_PROPERTIES);
         byte[][] splits = new byte[][] { K3, K9, KR };
-        ensureTableCreated(getUrl(), STABLE_NAME, STABLE_NAME, splits);
+        createTable(conn, splits);
         
         PhoenixStatement stmt = 
conn.createStatement().unwrap(PhoenixStatement.class);
-        ResultSet rs = stmt.executeQuery("SELECT * FROM " + STABLE_NAME + " 
LIMIT 1");
+        ResultSet rs = stmt.executeQuery("SELECT * FROM " + tableName + " 
LIMIT 1");
         rs.next();
         QueryPlan plan = stmt.getQueryPlan();
         List<List<Scan>> nestedScans = plan.getScans();
@@ -138,56 +148,57 @@ public class ParallelIteratorsIT extends 
BaseOwnClusterHBaseManagedTimeIT {
     public void testGuidePostsLifeCycle() throws Exception {
         Connection conn = DriverManager.getConnection(getUrl(), 
TEST_PROPERTIES);
         byte[][] splits = new byte[][] { K3, K9, KR };
-        ensureTableCreated(getUrl(), STABLE_NAME, STABLE_NAME, splits);
+        createTable(conn, splits);
+        
         // create index
-        conn.createStatement().execute("CREATE INDEX " + STABLE_INDEX + " ON " 
+ STABLE_NAME + "( \"value\")");
+        conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + 
tableName + "( \"value\")");
         // before upserting
-        List<KeyRange> keyRanges = getAllSplits(conn);
+        List<KeyRange> keyRanges = getAllSplits(conn, tableName);
         assertEquals(4, keyRanges.size());
         upsert(conn, new byte[][] { KMIN, K4, K11 });
         // Analyze table alone
-        analyzeTableColumns(conn);
-        keyRanges = getAllSplits(conn);
+        analyzeTableColumns(conn, tableName);
+        keyRanges = getAllSplits(conn, tableName);
         assertEquals(7, keyRanges.size());
         // Get all splits on the index table before calling analyze on the 
index table
-        List<KeyRange> indexSplits = getAllSplits(conn, STABLE_INDEX);
+        List<KeyRange> indexSplits = getAllSplits(conn, indexName);
         assertEquals(1, indexSplits.size());
         // Analyze the index table alone
-        analyzeTableIndex(conn, STABLE_NAME);
+        analyzeTableIndex(conn, tableName);
         // check the splits of the main table 
-        keyRanges = getAllSplits(conn);
+        keyRanges = getAllSplits(conn, tableName);
         assertEquals(7, keyRanges.size());
         // check the splits on the index table
-        indexSplits = getAllSplits(conn, STABLE_INDEX);
+        indexSplits = getAllSplits(conn, indexName);
         assertEquals(4, indexSplits.size());
         upsert(conn, new byte[][] { KMIN2, K5, K12 });
         // Update the stats for both the table and the index table
-        analyzeTable(conn);
-        keyRanges = getAllSplits(conn);
+        analyzeTable(conn, tableName);
+        keyRanges = getAllSplits(conn, tableName);
         assertEquals(10, keyRanges.size());
         // the above analyze should have udpated the index splits also
-        indexSplits = getAllSplits(conn, STABLE_INDEX);
+        indexSplits = getAllSplits(conn, indexName);
         assertEquals(7, indexSplits.size());
         upsert(conn, new byte[][] { K1, K6, KP });
         // Update only the table
-        analyzeTableColumns(conn);
-        keyRanges = getAllSplits(conn);
+        analyzeTableColumns(conn, tableName);
+        keyRanges = getAllSplits(conn, tableName);
         assertEquals(13, keyRanges.size());
         // No change to the index splits
-        indexSplits = getAllSplits(conn, STABLE_INDEX);
+        indexSplits = getAllSplits(conn, indexName);
         assertEquals(7, indexSplits.size());
-        analyzeTableIndex(conn, STABLE_NAME);
-        indexSplits = getAllSplits(conn, STABLE_INDEX);
+        analyzeTableIndex(conn, tableName);
+        indexSplits = getAllSplits(conn, indexName);
         // the above analyze should have udpated the index splits only
         assertEquals(10, indexSplits.size());
         // No change in main table splits
-        keyRanges = getAllSplits(conn);
+        keyRanges = getAllSplits(conn, tableName);
         assertEquals(13, keyRanges.size());
         conn.close();
     }
 
-    private static void upsert(Connection conn, byte[][] val) throws Exception 
{
-        PreparedStatement stmt = conn.prepareStatement("upsert into " + 
STABLE_NAME + " VALUES (?, ?)");
+    private void upsert(Connection conn, byte[][] val) throws Exception {
+        PreparedStatement stmt = conn.prepareStatement("upsert into " + 
tableName + " VALUES (?, ?)");
         stmt.setString(1, new String(val[0]));
         stmt.setInt(2, 1);
         stmt.execute();
@@ -204,16 +215,13 @@ public class ParallelIteratorsIT extends 
BaseOwnClusterHBaseManagedTimeIT {
         return PChar.INSTANCE.getKeyRange(lowerRange, true, upperRange, false);
     }
     
-    private static void initTableValues(Connection conn) throws Exception {
-        byte[][] splits = new byte[][] {K3,K4,K9,K11};
-        ensureTableCreated(getUrl(),STABLE_NAME, STABLE_NAME, splits);
-        PreparedStatement stmt = conn.prepareStatement("upsert into " + 
STABLE_NAME + " VALUES (?, ?)");
-        stmt.setString(1, new String(KMIN));
-        stmt.setInt(2, 1);
-        stmt.execute();
-        stmt.setString(1, new String(KMAX));
-        stmt.setInt(2, 2);
+    private void createTable (Connection conn, byte[][] splits) throws 
SQLException {
+        PreparedStatement stmt = conn.prepareStatement("create table " + 
tableName +
+                "   (id char(1) not null primary key,\n" +
+                "    \"value\" integer) SPLIT ON (" + 
Joiner.on(',').join(Collections.nCopies(splits.length, "?")) + ")");
+        for (int i = 0; i < splits.length; i++) {
+            stmt.setBytes(i+1, splits[i]);
+        }
         stmt.execute();
-        conn.commit();
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsEnabledIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsEnabledIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsEnabledIT.java
index 8ecb7b4..322cb9e 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsEnabledIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsEnabledIT.java
@@ -18,8 +18,15 @@
 
 package org.apache.phoenix.end2end;
 
+import java.util.Map;
+
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
 import org.junit.experimental.categories.Category;
 
+import com.google.common.collect.Maps;
+
 /**
  * 
  * Base class for tests that have statistics enabled.
@@ -27,5 +34,12 @@ import org.junit.experimental.categories.Category;
  */
 @Category(ParallelStatsEnabledTest.class)
 public abstract class ParallelStatsEnabledIT extends BaseParallelIT {
-
+    
+    @BeforeClass
+    @Shadower(classBeingShadowed = BaseParallelIT.class)
+    public static void doSetup() throws Exception {
+        Map<String,String> props = Maps.newHashMapWithExpectedSize(5);
+        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 
Long.toString(20));
+        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryTimeoutIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryTimeoutIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryTimeoutIT.java
index 7450022..2d58615 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryTimeoutIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryTimeoutIT.java
@@ -48,7 +48,7 @@ import org.junit.Test;
 import com.google.common.collect.Maps;
 
 
-public class QueryTimeoutIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class QueryTimeoutIT extends BaseOwnClusterIT {
     private static final String TEST_TABLE_NAME = "T";
     
     @BeforeClass

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithLimitIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithLimitIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithLimitIT.java
index fae7a7c..cf8b0a9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithLimitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithLimitIT.java
@@ -43,7 +43,7 @@ import org.junit.Test;
 import com.google.common.collect.Maps;
 
 
-public class QueryWithLimitIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class QueryWithLimitIT extends BaseOwnClusterIT {
 
     @BeforeClass
     public static void doSetup() throws Exception {
@@ -61,7 +61,9 @@ public class QueryWithLimitIT extends 
BaseOwnClusterHBaseManagedTimeIT {
         Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
         try {
-            ensureTableCreated(getUrl(), KEYONLY_NAME, KEYONLY_NAME);
+            conn.createStatement().execute("create table " + KEYONLY_NAME + 
"\n" +
+                "   (i1 integer not null, i2 integer not null\n" +
+                "    CONSTRAINT pk PRIMARY KEY (i1,i2))");
             initTableValues(conn, 100);
             
             String query = "SELECT i1 FROM KEYONLY LIMIT 1";
@@ -85,7 +87,9 @@ public class QueryWithLimitIT extends 
BaseOwnClusterHBaseManagedTimeIT {
         Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
 
-        ensureTableCreated(getUrl(), KEYONLY_NAME, KEYONLY_NAME);
+        conn.createStatement().execute("create table " + KEYONLY_NAME + "\n" +
+                "   (i1 integer not null, i2 integer not null\n" +
+                "    CONSTRAINT pk PRIMARY KEY (i1,i2))");
         initTableValues(conn, 100);
         conn.createStatement().execute("UPDATE STATISTICS " + KEYONLY_NAME);
         

Reply via email to