This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
     new 740ce13  PHOENIX-6072: SYSTEM.MUTEX not created with a TTL on a fresh 
cluster connected to by a 4.15+ client
740ce13 is described below

commit 740ce13210cf6d876abfde52a58e10fa407b5018
Author: Chinmay Kulkarni <chinmayskulka...@gmail.com>
AuthorDate: Mon Sep 14 21:43:33 2020 -0700

    PHOENIX-6072: SYSTEM.MUTEX not created with a TTL on a fresh cluster 
connected to by a 4.15+ client
---
 ...ava => SystemTablesCreationOnConnectionIT.java} | 515 ++++++++++++---------
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java      |   1 +
 .../phoenix/query/ConnectionQueryServicesImpl.java |  76 ++-
 .../query/ConnectionlessQueryServicesImpl.java     |   2 +-
 .../org/apache/phoenix/query/QueryConstants.java   | 382 +++++++++------
 .../query/ConnectionQueryServicesImplTest.java     | 101 +++-
 6 files changed, 698 insertions(+), 379 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java
similarity index 55%
rename from 
phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
rename to 
phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java
index de047a3..627d7b2 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java
@@ -18,6 +18,9 @@
 package org.apache.phoenix.end2end;
 
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_FOR_MUTEX;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -29,6 +32,7 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -39,10 +43,12 @@ import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.UpgradeRequiredException;
@@ -63,13 +69,13 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 @Category(NeedsOwnMiniClusterTest.class)
-public class SystemCatalogCreationOnConnectionIT {
+public class SystemTablesCreationOnConnectionIT {
     private HBaseTestingUtility testUtil = null;
     private Set<String> hbaseTables;
     private static boolean setOldTimestampToInduceUpgrade = false;
     private static int countUpgradeAttempts;
-    // This flag is used to figure out if the SYSCAT schema was actually 
upgraded or not, based on the timestamp of SYSCAT
-    // (different from an upgrade attempt)
+    // This flag is used to figure out if the SYSCAT schema was actually 
upgraded or not, based on
+    // the timestamp of SYSCAT (different from an upgrade attempt)
     private static int actualSysCatUpgrades;
     private static final String PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG = 
"SYSTEM:CATALOG";
     private static final String PHOENIX_SYSTEM_CATALOG = "SYSTEM.CATALOG";
@@ -79,8 +85,9 @@ public class SystemCatalogCreationOnConnectionIT {
             + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY 
KEY(K1,K2))";
     private static final String SELECT_STMT = "SELECT * FROM %s";
     private static final String DELETE_STMT = "DELETE FROM %s";
-    private static final String CREATE_INDEX_STMT = "CREATE INDEX DUMMY_IDX ON 
%s (K1) INCLUDE (K2)";
+    private static final String CREATE_INDEX_STMT = "CREATE INDEX DUMMYIDX ON 
%s (K1) INCLUDE (K2)";
     private static final String UPSERT_STMT = "UPSERT INTO %s VALUES ('A', 
'B')";
+    private static final String QUERY_SYSTEM_CATALOG = "SELECT * FROM 
SYSTEM.CATALOG LIMIT 1";
 
     private static final Set<String> PHOENIX_SYSTEM_TABLES = new 
HashSet<>(Arrays.asList(
       "SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", "SYSTEM.FUNCTION",
@@ -92,7 +99,8 @@ public class SystemCatalogCreationOnConnectionIT {
 
     private static class PhoenixSysCatCreationServices extends 
ConnectionQueryServicesImpl {
 
-        PhoenixSysCatCreationServices(QueryServices services, 
PhoenixEmbeddedDriver.ConnectionInfo connectionInfo, Properties info) {
+        PhoenixSysCatCreationServices(QueryServices services,
+                PhoenixEmbeddedDriver.ConnectionInfo connectionInfo, 
Properties info) {
             super(services, connectionInfo, info);
         }
 
@@ -105,7 +113,8 @@ public class SystemCatalogCreationOnConnectionIT {
         @Override
         protected long getSystemTableVersion() {
             if (setOldTimestampToInduceUpgrade) {
-                // Return the next lower version where an upgrade was 
performed to induce setting the upgradeRequired flag
+                // Return the next lower version where an upgrade was 
performed to induce setting
+                // the upgradeRequired flag
                 return MetaDataProtocol.getPriorUpgradeVersion();
             }
             return MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP;
@@ -113,8 +122,10 @@ public class SystemCatalogCreationOnConnectionIT {
 
         @Override
         protected PhoenixConnection 
upgradeSystemCatalogIfRequired(PhoenixConnection metaConnection,
-          long currentServerSideTableTimeStamp) throws InterruptedException, 
SQLException, TimeoutException, IOException {
-            PhoenixConnection newMetaConnection = 
super.upgradeSystemCatalogIfRequired(metaConnection, 
currentServerSideTableTimeStamp);
+          long currentServerSideTableTimeStamp) throws InterruptedException, 
SQLException,
+                TimeoutException, IOException {
+            PhoenixConnection newMetaConnection = 
super.upgradeSystemCatalogIfRequired(
+                    metaConnection, currentServerSideTableTimeStamp);
             if (currentServerSideTableTimeStamp < 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP) {
                 actualSysCatUpgrades++;
             }
@@ -131,18 +142,22 @@ public class SystemCatalogCreationOnConnectionIT {
         }
 
         @Override // public for testing
-        public synchronized ConnectionQueryServices 
getConnectionQueryServices(String url, Properties info) throws SQLException {
+        public synchronized ConnectionQueryServices 
getConnectionQueryServices(String url,
+                Properties info) throws SQLException {
             if (cqs == null) {
-                cqs = new PhoenixSysCatCreationServices(new 
QueryServicesTestImpl(getDefaultProps(), overrideProps), 
ConnectionInfo.create(url), info);
+                cqs = new PhoenixSysCatCreationServices(new 
QueryServicesTestImpl(getDefaultProps(),
+                        overrideProps), ConnectionInfo.create(url), info);
                 cqs.init(url, info);
             }
             return cqs;
         }
 
-        // NOTE: Do not use this if you want to try re-establishing a 
connection from the client using a previously
-        // used ConnectionQueryServices instance. This is used only in cases 
where we need to test server-side
-        // changes and don't care about client-side properties set from the 
init method.
-        // Reset the Connection Query Services instance so we can create a new 
connection to the cluster
+        // NOTE: Do not use this if you want to try re-establishing a 
connection from the client
+        // using a previously used ConnectionQueryServices instance. This is 
used only in cases
+        // where we need to test server-side changes and don't care about 
client-side properties
+        // set from the init method.
+        // Reset the Connection Query Services instance so we can create a new 
connection to the
+        // cluster
         void resetCQS() {
             cqs = null;
         }
@@ -177,12 +192,13 @@ public class SystemCatalogCreationOnConnectionIT {
         Properties propsDoNotUpgradePropSet = new Properties();
         // Set doNotUpgradeProperty to true
         UpgradeUtil.doNotUpgradeOnFirstConnection(propsDoNotUpgradePropSet);
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
-          new 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
+        PhoenixSysCatCreationTestingDriver driver =
+                new 
PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
 
         driver.getConnectionQueryServices(getJdbcUrl(), 
propsDoNotUpgradePropSet);
         hbaseTables = getHBaseTables();
-        assertFalse(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG) || 
hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG));
+        assertFalse(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG) ||
+                hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG));
         assertEquals(0, hbaseTables.size());
         assertEquals(1, countUpgradeAttempts);
     }
@@ -191,36 +207,37 @@ public class SystemCatalogCreationOnConnectionIT {
     /********************* Testing SYSTEM.CATALOG/SYSTEM:CATALOG 
creation/upgrade behavior for subsequent connections *********************/
 
 
-    // Conditions: server-side namespace mapping is enabled, the first 
connection to the server will create all namespace
-    // mapped SYSTEM tables i.e. SYSTEM:.*, the SYSTEM:CATALOG timestamp at 
creation is purposefully set to be <
-    // MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP. The subsequent connection 
has client-side namespace mapping enabled
+    // Conditions: server-side namespace mapping is enabled, the first 
connection to the server will
+    // create all namespace mapped SYSTEM tables i.e. SYSTEM:.*, the 
SYSTEM:CATALOG timestamp at
+    // creation is purposefully set to be < 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP.
+    // The subsequent connection has client-side namespace mapping enabled
     // Expected: An upgrade is attempted when the second client connects to 
the server
     @Test
     public void testUpgradeAttempted() throws Exception {
         setOldTimestampToInduceUpgrade = true;
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
-          firstConnectionNSMappingServerEnabledClientEnabled();
+        PhoenixSysCatCreationTestingDriver driver = 
firstConnNSMappingServerEnabledClientEnabled();
         driver.resetCQS();
         Properties clientProps = getClientProperties(true, true);
         setOldTimestampToInduceUpgrade = false;
         driver.getConnectionQueryServices(getJdbcUrl(), clientProps);
         // There should be no new tables
         assertEquals(hbaseTables, getHBaseTables());
-        // Since we set an old timestamp on purpose when creating 
SYSTEM:CATALOG, the second connection attempts to upgrade it
+        // Since we set an old timestamp on purpose when creating 
SYSTEM:CATALOG,
+        // the second connection attempts to upgrade it
         assertEquals(1, countUpgradeAttempts);
         assertEquals(1, actualSysCatUpgrades);
     }
 
-    // Conditions: server-side namespace mapping is enabled, the first 
connection to the server will create all namespace
-    // mapped SYSTEM tables i.e. SYSTEM:.*, the SYSTEM:CATALOG timestamp at 
creation is purposefully set to be <
-    // MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP. The subsequent connection 
has client-side namespace mapping enabled
-    // Expected: An upgrade is attempted when the second client connects to 
the server, but this fails since the
-    // isDoNotUpgradePropSet is set to true. We later run EXECUTE UPGRADE 
manually
+    // Conditions: server-side namespace mapping is enabled, the first 
connection to the server will
+    // create all namespace mapped SYSTEM tables i.e. SYSTEM:.*, the 
SYSTEM:CATALOG timestamp at
+    // creation is purposefully set to be < 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP.
+    // The subsequent connection has client-side namespace mapping enabled
+    // Expected: An upgrade is attempted when the second client connects to 
the server, but this
+    // fails since isDoNotUpgradePropSet is set to true. We later run EXECUTE 
UPGRADE manually
     @Test
     public void testUpgradeNotAllowed() throws Exception {
         setOldTimestampToInduceUpgrade = true;
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
-          firstConnectionNSMappingServerEnabledClientEnabled();
+        PhoenixSysCatCreationTestingDriver driver = 
firstConnNSMappingServerEnabledClientEnabled();
         driver.resetCQS();
         Properties clientProps = getClientProperties(true, true);
         UpgradeUtil.doNotUpgradeOnFirstConnection(clientProps);
@@ -232,29 +249,31 @@ public class SystemCatalogCreationOnConnectionIT {
         }
         // There should be no new tables
         assertEquals(hbaseTables, getHBaseTables());
-        // Since we set an old timestamp on purpose when creating 
SYSTEM:CATALOG, the second connection attempts to upgrade it
+        // Since we set an old timestamp on purpose when creating 
SYSTEM:CATALOG, the second
+        // connection attempts to upgrade it
         assertEquals(1, countUpgradeAttempts);
         // This connection is unable to actually upgrade SYSTEM:CATALOG due to 
isDoNotUpgradePropSet
         assertEquals(0, actualSysCatUpgrades);
-        Connection conn = driver.getConnectionQueryServices(getJdbcUrl(), new 
Properties()).connect(getJdbcUrl(), new Properties());
-        try {
-            conn.createStatement().execute(EXECUTE_UPGRADE_COMMAND);
+        try (Connection conn = driver.getConnectionQueryServices(getJdbcUrl(), 
new Properties())
+                .connect(getJdbcUrl(), new Properties()); Statement stmt = 
conn.createStatement()) {
+            stmt.execute(EXECUTE_UPGRADE_COMMAND);
             // Actually upgraded SYSTEM:CATALOG
             assertEquals(1, actualSysCatUpgrades);
-        } finally {
-            conn.close();
         }
     }
 
-    // Conditions: server-side namespace mapping is enabled, the first 
connection to the server will create unmapped SYSTEM
-    // tables SYSTEM\..* whose timestamp at creation is purposefully set to be 
< MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP.
-    // The second connection has client-side namespace mapping enabled and 
system table to system namespace mapping enabled
-    // Expected: We will migrate all SYSTEM\..* tables to the SYSTEM namespace 
and also upgrade SYSTEM:CATALOG
+    // Conditions: server-side namespace mapping is enabled, the first 
connection to the server will
+    // create unmapped SYSTEM tables SYSTEM\..* whose timestamp at creation is 
purposefully set to
+    // be < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP.
+    // The second connection has client-side namespace mapping enabled and 
system table to system
+    // namespace mapping enabled
+    // Expected: We will migrate all SYSTEM\..* tables to the SYSTEM namespace 
and also upgrade
+    // SYSTEM:CATALOG
     @Test
     public void testMigrateToSystemNamespaceAndUpgradeSysCat() throws 
Exception {
         setOldTimestampToInduceUpgrade = true;
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
-          firstConnectionNSMappingServerEnabledClientEnabledMappingDisabled();
+        PhoenixSysCatCreationTestingDriver driver =
+                firstConnNSMappingServerEnabledClientEnabledMappingDisabled();
         driver.resetCQS();
         setOldTimestampToInduceUpgrade = false;
         Properties clientProps = getClientProperties(true, true);
@@ -265,64 +284,73 @@ public class SystemCatalogCreationOnConnectionIT {
         assertEquals(1, actualSysCatUpgrades);
     }
 
-    // Conditions: server-side namespace mapping is enabled, the first 
connection to the server will create all namespace
-    // mapped SYSTEM tables i.e. SYSTEM:.*, the second connection has 
client-side namespace mapping disabled
+    // Conditions: server-side namespace mapping is enabled, the first 
connection to the server will
+    // create all namespace mapped SYSTEM tables i.e. SYSTEM:.*, the second 
connection has
+    // client-side namespace mapping disabled
     // Expected: Throw Inconsistent namespace mapping exception from 
ensureTableCreated
     @Test
     public void testTablesExistInconsistentNSMappingFails() throws Exception {
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
-          firstConnectionNSMappingServerEnabledClientEnabled();
+        PhoenixSysCatCreationTestingDriver driver = 
firstConnNSMappingServerEnabledClientEnabled();
         driver.resetCQS();
         Properties clientProps = getClientProperties(false, false);
         try {
             driver.getConnectionQueryServices(getJdbcUrl(), clientProps);
-            fail("Client should not be able to connect to cluster with 
inconsistent client-server namespace mapping properties");
+            fail("Client should not be able to connect to cluster with 
inconsistent client-server "
+                    + "namespace mapping properties");
         } catch (SQLException sqlE) {
-            
assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(),
 sqlE.getErrorCode());
+            
assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(),
+                    sqlE.getErrorCode());
         }
         hbaseTables = getHBaseTables();
         assertEquals(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES, hbaseTables);
         assertEquals(0, countUpgradeAttempts);
     }
 
-    // Conditions: server-side namespace mapping is enabled, the first 
connection to the server will not create any
-    // SYSTEM tables. The second connection has client-side namespace mapping 
enabled
+    // Conditions: server-side namespace mapping is enabled, the first 
connection to the server will
+    // not create any SYSTEM tables. The second connection has client-side 
namespace mapping enabled
     // Expected: We create SYSTEM:.* tables
     @Test
     public void testIncompatibleNSMappingServerEnabledConnectionFails() throws 
Exception {
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
-          firstConnectionNSMappingServerEnabledClientDisabled();
+        PhoenixSysCatCreationTestingDriver driver = 
firstConnNSMappingServerEnabledClientDisabled();
         driver.resetCQS();
         // now try a client with ns mapping enabled
         Properties clientProps = getClientProperties(true, true);
-        Connection conn = driver.getConnectionQueryServices(getJdbcUrl(), 
clientProps)
-                .connect(getJdbcUrl(), new Properties());
-        hbaseTables = getHBaseTables();
-        assertEquals(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES, hbaseTables);
-        assertEquals(0, countUpgradeAttempts);
-
-        ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM 
SYSTEM.CATALOG LIMIT 1");
-        // Tests that SYSTEM:CATALOG contains necessary metadata rows for 
itself (See PHOENIX-5302)
-        assertTrue(rs.next());
+        try (Connection conn = driver.getConnectionQueryServices(getJdbcUrl(), 
clientProps)
+                .connect(getJdbcUrl(), new Properties())) {
+            hbaseTables = getHBaseTables();
+            assertEquals(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES, hbaseTables);
+            assertEquals(0, countUpgradeAttempts);
+
+            try (Statement stmt = conn.createStatement()) {
+                ResultSet rs = stmt.executeQuery(QUERY_SYSTEM_CATALOG);
+                // Tests that SYSTEM:CATALOG contains necessary metadata rows 
for itself
+                // (See PHOENIX-5302)
+                assertTrue(rs.next());
+            }
+        }
     }
 
-    // Conditions: server-side namespace mapping is disabled, the first 
connection to the server will create all unmapped
-    // SYSTEM tables i.e. SYSTEM\..*, the second connection has client-side 
namespace mapping enabled
-    // Expected: Throw Inconsistent namespace mapping exception when you check 
client-server compatibility
+    // Conditions: server-side namespace mapping is disabled, the first 
connection to the server
+    // will create all unmapped SYSTEM tables i.e. SYSTEM\..*, the second 
connection has client-side
+    // namespace mapping enabled
+    // Expected: Throw Inconsistent namespace mapping exception when you check 
client-server
+    // compatibility
     //
     // Then another connection has client-side namespace mapping disabled
     // Expected: All SYSTEM\..* tables exist and no upgrade is required
     @Test
     public void testSysTablesExistNSMappingDisabled() throws Exception {
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
-          firstConnectionNSMappingServerDisabledClientDisabled();
+        PhoenixSysCatCreationTestingDriver driver =
+                firstConnNSMappingServerDisabledClientDisabled();
         driver.resetCQS();
         Properties clientProps = getClientProperties(true, true);
         try {
             driver.getConnectionQueryServices(getJdbcUrl(), clientProps);
-            fail("Client should not be able to connect to cluster with 
inconsistent client-server namespace mapping properties");
+            fail("Client should not be able to connect to cluster with 
inconsistent client-server "
+                    + "namespace mapping properties");
         } catch (SQLException sqlE) {
-            
assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(),
 sqlE.getErrorCode());
+            
assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(),
+                    sqlE.getErrorCode());
         }
         hbaseTables = getHBaseTables();
         assertEquals(PHOENIX_SYSTEM_TABLES, hbaseTables);
@@ -336,45 +364,49 @@ public class SystemCatalogCreationOnConnectionIT {
         assertEquals(0, countUpgradeAttempts);
     }
 
-    // Conditions: server-side namespace mapping is disabled, the first 
connection to the server will not create any
-    // SYSTEM tables. The second connection has client-side namespace mapping 
disabled
+    // Conditions: server-side namespace mapping is disabled, the first 
connection to the server
+    // will not create any SYSTEM tables. The second connection has 
client-side namespace mapping
+    // disabled
     // Expected: The second connection should create all SYSTEM.* tables
     @Test
     public void testIncompatibleNSMappingServerDisabledConnectionFails() 
throws Exception {
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
-          firstConnectionNSMappingServerDisabledClientEnabled();
+        PhoenixSysCatCreationTestingDriver driver = 
firstConnNSMappingServerDisabledClientEnabled();
         driver.resetCQS();
         // now try a client with ns mapping disabled
         Properties clientProps = getClientProperties(false, false);
-        Connection conn = driver.getConnectionQueryServices(getJdbcUrl(), 
clientProps)
-                .connect(getJdbcUrl(), new Properties());
-        hbaseTables = getHBaseTables();
-        assertEquals(PHOENIX_SYSTEM_TABLES, hbaseTables);
-        assertEquals(0, countUpgradeAttempts);
-
-        ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM 
SYSTEM.CATALOG LIMIT 1");
-        // Tests that SYSTEM.CATALOG contains necessary metadata rows for 
itself (See PHOENIX-5302)
-        assertTrue(rs.next());
+        try (Connection conn = driver.getConnectionQueryServices(getJdbcUrl(), 
clientProps)
+                .connect(getJdbcUrl(), new Properties())) {
+            hbaseTables = getHBaseTables();
+            assertEquals(PHOENIX_SYSTEM_TABLES, hbaseTables);
+            assertEquals(0, countUpgradeAttempts);
+
+            try (Statement stmt = conn.createStatement()) {
+                ResultSet rs = stmt.executeQuery(QUERY_SYSTEM_CATALOG);
+                // Tests that SYSTEM.CATALOG contains necessary metadata rows 
for itself
+                // (See PHOENIX-5302)
+                assertTrue(rs.next());
+            }
+        }
     }
 
-    // Conditions: The first connection creates all SYSTEM tables via "EXECUTE 
UPGRADE" since auto-upgrade is disabled
-    // and the same client alters HBase metadata for SYSTEM.CATALOG
-    // Expected: Another client connection (with a new ConnectionQueryServices 
instance) made to the server does not
-    // revert the metadata change
+    // Conditions: The first connection creates all SYSTEM tables via "EXECUTE 
UPGRADE" since
+    // auto-upgrade is disabled and the same client alters HBase metadata for 
SYSTEM.CATALOG
+    // Expected: Another client connection (with a new ConnectionQueryServices 
instance) made to
+    // the server does not revert the metadata change
     @Test
     public void testMetadataAlterRemainsAutoUpgradeDisabled() throws Exception 
{
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver = firstConnectionAutoUpgradeToggle(false);
-        assertEquals(Integer.parseInt(MODIFIED_MAX_VERSIONS), 
verifyModificationTableMetadata(driver, PHOENIX_SYSTEM_CATALOG));
+        PhoenixSysCatCreationTestingDriver driver = 
firstConnAutoUpgradeToggle(false);
+        assertEquals(Integer.parseInt(MODIFIED_MAX_VERSIONS), 
verifyModifiedTableMetadata(driver));
     }
 
-    // Conditions: The first connection creates all SYSTEM tables 
(auto-upgrade is enabled) and the same client alters
-    // HBase metadata for SYSTEM.CATALOG
-    // Expected: Another client connection (with a new ConnectionQueryServices 
instance) made to the server does not
-    // revert the metadata change
+    // Conditions: The first connection creates all SYSTEM tables 
(auto-upgrade is enabled) and
+    // the same client alters HBase metadata for SYSTEM.CATALOG
+    // Expected: Another client connection (with a new ConnectionQueryServices 
instance) made to
+    // the server does not revert the metadata change
     @Test
     public void testMetadataAlterRemainsAutoUpgradeEnabled() throws Exception {
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver = firstConnectionAutoUpgradeToggle(true);
-        assertEquals(Integer.parseInt(MODIFIED_MAX_VERSIONS), 
verifyModificationTableMetadata(driver, PHOENIX_SYSTEM_CATALOG));
+        PhoenixSysCatCreationTestingDriver driver = 
firstConnAutoUpgradeToggle(true);
+        assertEquals(Integer.parseInt(MODIFIED_MAX_VERSIONS), 
verifyModifiedTableMetadata(driver));
     }
 
     // Test the case when an end-user uses the vanilla PhoenixDriver to create 
a connection and a
@@ -385,66 +417,96 @@ public class SystemCatalogCreationOnConnectionIT {
         // Register the vanilla PhoenixDriver
         DriverManager.registerDriver(PhoenixDriver.INSTANCE);
         startMiniClusterWithToggleNamespaceMapping(Boolean.FALSE.toString());
-        Properties propsDoNotUpgradePropSet = new Properties();
+        Properties propsDoNotUpgradeSet = new Properties();
         // Set doNotUpgradeProperty to true
-        UpgradeUtil.doNotUpgradeOnFirstConnection(propsDoNotUpgradePropSet);
+        UpgradeUtil.doNotUpgradeOnFirstConnection(propsDoNotUpgradeSet);
+
+        try (Connection conn = DriverManager.getConnection(getJdbcUrl(), 
propsDoNotUpgradeSet)) {
+            hbaseTables = getHBaseTables();
+            assertFalse(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG)
+                    || 
hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG));
+            assertEquals(0, hbaseTables.size());
+
+            // Test that we are unable to run any other queries using this 
connection
+            // until we upgrade
+            final String tableName = generateUniqueName();
+            try (Statement stmt = conn.createStatement()) {
+                stmt.execute(String.format(CREATE_TABLE_STMT, tableName));
+                fail("CREATE TABLE should have failed with 
UpgradeRequiredException");
+            } catch (UpgradeRequiredException expected) {
 
-        Connection conn = DriverManager.getConnection(getJdbcUrl(), 
propsDoNotUpgradePropSet);
-        hbaseTables = getHBaseTables();
-        assertFalse(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG)
-                || 
hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG));
-        assertEquals(0, hbaseTables.size());
+            }
+            try (Statement stmt = conn.createStatement()) {
+                stmt.execute(String.format(SELECT_STMT, tableName));
+                fail("SELECT should have failed with 
UpgradeRequiredException");
+            } catch (UpgradeRequiredException expected) {
 
-        // Test that we are unable to run any other queries using this 
connection until we upgrade
-        final String tableName = generateUniqueName();
-        try {
-            conn.createStatement().execute(String.format(CREATE_TABLE_STMT, 
tableName));
-            fail("CREATE TABLE should have failed with 
UpgradeRequiredException");
-        } catch (UpgradeRequiredException expected) {
+            }
+            try (Statement stmt = conn.createStatement()) {
+                stmt.execute(String.format(DELETE_STMT, tableName));
+                fail("DELETE should have failed with 
UpgradeRequiredException");
+            } catch (UpgradeRequiredException expected) {
 
-        }
-        try {
-            conn.createStatement().execute(String.format(SELECT_STMT, 
tableName));
-            fail("SELECT should have failed with UpgradeRequiredException");
-        } catch (UpgradeRequiredException expected) {
+            }
+            try (Statement stmt = conn.createStatement()) {
+                stmt.execute(String.format(CREATE_INDEX_STMT, tableName));
+                fail("CREATE INDEX should have failed with 
UpgradeRequiredException");
+            } catch (UpgradeRequiredException expected) {
 
-        }
-        try {
-            conn.createStatement().execute(String.format(DELETE_STMT, 
tableName));
-            fail("DELETE should have failed with UpgradeRequiredException");
-        } catch (UpgradeRequiredException expected) {
+            }
+            try (Statement stmt = conn.createStatement()) {
+                stmt.execute(String.format(UPSERT_STMT, tableName));
+                fail("UPSERT VALUES should have failed with 
UpgradeRequiredException");
+            } catch (UpgradeRequiredException expected) {
 
-        }
-        try {
-            conn.createStatement().execute(String.format(CREATE_INDEX_STMT, 
tableName));
-            fail("CREATE INDEX should have failed with 
UpgradeRequiredException");
-        } catch (UpgradeRequiredException expected) {
+            }
 
-        }
-        try {
-            conn.createStatement().execute(String.format(UPSERT_STMT, 
tableName));
-            fail("UPSERT VALUES should have failed with 
UpgradeRequiredException");
-        } catch (UpgradeRequiredException expected) {
+            // Now run the upgrade command. All SYSTEM tables should be created
+            try (Statement stmt = conn.createStatement()) {
+                stmt.execute(EXECUTE_UPGRADE_COMMAND);
+            }
+            hbaseTables = getHBaseTables();
+            assertEquals(PHOENIX_SYSTEM_TABLES, hbaseTables);
 
+            // Now we can run any other query/mutation using this connection 
object
+            try (Statement stmt = conn.createStatement()) {
+                stmt.execute(String.format(CREATE_TABLE_STMT, tableName));
+            }
+            try (Statement stmt = conn.createStatement()) {
+                stmt.execute(String.format(SELECT_STMT, tableName));
+            }
+            try (Statement stmt = conn.createStatement()) {
+                stmt.execute(String.format(DELETE_STMT, tableName));
+            }
+            try (Statement stmt = conn.createStatement()) {
+                stmt.execute(String.format(CREATE_INDEX_STMT, tableName));
+            }
+            try (Statement stmt = conn.createStatement()) {
+                stmt.execute(String.format(UPSERT_STMT, tableName));
+            }
         }
+    }
 
-        // Now run the upgrade command. All SYSTEM tables should be created
-        conn.createStatement().execute("EXECUTE UPGRADE");
-        hbaseTables = getHBaseTables();
-        assertEquals(PHOENIX_SYSTEM_TABLES, hbaseTables);
-
-        // Now we can run any other query/mutation using this connection object
-        conn.createStatement().execute(String.format(CREATE_TABLE_STMT, 
tableName));
-        conn.createStatement().execute(String.format(SELECT_STMT, tableName));
-        conn.createStatement().execute(String.format(DELETE_STMT, tableName));
-        conn.createStatement().execute(String.format(CREATE_INDEX_STMT, 
tableName));
-        conn.createStatement().execute(String.format(UPSERT_STMT, tableName));
+    // Test that whenever a client connects to a fresh cluster, SYSTEM.MUTEX 
is created with a
+    // TTL as defined by TTL_FOR_MUTEX
+    @Test
+    public void testSysMutexHasCorrectTTL() throws Exception {
+        // Register the vanilla PhoenixDriver
+        DriverManager.registerDriver(PhoenixDriver.INSTANCE);
+        startMiniClusterWithToggleNamespaceMapping(Boolean.FALSE.toString());
+        try (Connection ignored = DriverManager.getConnection(getJdbcUrl());
+                HBaseAdmin admin = testUtil.getHBaseAdmin()) {
+            HTableDescriptor htd = 
admin.getTableDescriptor(SYSTEM_MUTEX_HBASE_TABLE_NAME);
+            HColumnDescriptor hColDesc = 
htd.getFamily(SYSTEM_MUTEX_FAMILY_NAME_BYTES);
+            assertEquals("Did not find the correct TTL for SYSTEM.MUTEX", 
TTL_FOR_MUTEX,
+                    hColDesc.getTimeToLive());
+        }
     }
 
     /**
      * Return all created HBase tables
      * @return Set of HBase table name strings
-     * @throws IOException
+     * @throws IOException if there is a problem listing all HBase tables
      */
     private Set<String> getHBaseTables() throws IOException {
         Set<String> tables = new HashSet<>();
@@ -466,29 +528,37 @@ public class SystemCatalogCreationOnConnectionIT {
 
     /**
      * Alter the table metadata and return modified value
-     * @param driver
-     * @param tableName
+     * @param driver testing Phoenix driver
      * @return value of VERSIONS option for the table
-     * @throws Exception
+     * @throws Exception if there is an error modifying the HBase metadata for 
SYSTEM.CATALOG
      */
-    private int 
verifyModificationTableMetadata(PhoenixSysCatCreationTestingDriver driver, 
String tableName) throws Exception {
+    private int verifyModifiedTableMetadata(PhoenixSysCatCreationTestingDriver 
driver)
+            throws Exception {
         // Modify table metadata
-        Connection conn = driver.getConnectionQueryServices(getJdbcUrl(), new 
Properties()).connect(getJdbcUrl(), new Properties());
-        conn.createStatement().execute("ALTER TABLE " + tableName + " SET 
VERSIONS = " + MODIFIED_MAX_VERSIONS);
+        try (Connection conn = driver.getConnectionQueryServices(getJdbcUrl(), 
new Properties())
+                .connect(getJdbcUrl(), new Properties()); Statement stmt = 
conn.createStatement()) {
+            stmt.execute("ALTER TABLE " + PHOENIX_SYSTEM_CATALOG + " SET 
VERSIONS = "
+                    + MODIFIED_MAX_VERSIONS);
+        }
 
         // Connect via a client that creates a new ConnectionQueryServices 
instance
         driver.resetCQS();
-        driver.getConnectionQueryServices(getJdbcUrl(), new 
Properties()).connect(getJdbcUrl(), new Properties());
-        HTableDescriptor descriptor = 
testUtil.getHBaseAdmin().getTableDescriptor(TableName.valueOf(tableName));
+        try (PhoenixConnection ignored = driver.getConnectionQueryServices(
+                getJdbcUrl(), new Properties()).connect(getJdbcUrl(), new 
Properties())) {
+            // do nothing
+        }
+        HTableDescriptor descriptor = testUtil.getHBaseAdmin()
+                .getTableDescriptor(TableName.valueOf(PHOENIX_SYSTEM_CATALOG));
         return 
descriptor.getFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES).getMaxVersions();
     }
 
     /**
      * Start the mini-cluster with server-side namespace mapping property 
specified
-     * @param isNamespaceMappingEnabled
-     * @throws Exception
+     * @param isNamespaceMappingEnabled true if namespace mapping is enabled
+     * @throws Exception if there is an error starting the minicluster
      */
-    private void startMiniClusterWithToggleNamespaceMapping(String 
isNamespaceMappingEnabled) throws Exception {
+    private void startMiniClusterWithToggleNamespaceMapping(String 
isNamespaceMappingEnabled)
+            throws Exception {
         testUtil = new HBaseTestingUtility();
         Configuration conf = testUtil.getConfiguration();
         conf.set(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
isNamespaceMappingEnabled);
@@ -507,72 +577,77 @@ public class SystemCatalogCreationOnConnectionIT {
 
     /**
      * Set namespace mapping related properties for the client connection
-     * @param nsMappingEnabled
-     * @param systemTableMappingEnabled
+     * @param nsMappingEnabled true if namespace mapping is enabled
+     * @param systemTableMappingEnabled true if we are mapping SYSTEM tables 
to their own namespace
      * @return Properties object
      */
-    private Properties getClientProperties(boolean nsMappingEnabled, boolean 
systemTableMappingEnabled) {
+    private Properties getClientProperties(boolean nsMappingEnabled,
+            boolean systemTableMappingEnabled) {
         Properties clientProps = new Properties();
-        clientProps.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
Boolean.valueOf(nsMappingEnabled).toString());
-        
clientProps.setProperty(QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE, 
Boolean.valueOf(systemTableMappingEnabled).toString());
+        clientProps.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED,
+                Boolean.valueOf(nsMappingEnabled).toString());
+        
clientProps.setProperty(QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE,
+                Boolean.valueOf(systemTableMappingEnabled).toString());
         return clientProps;
     }
 
     /**
      * Initiate the first connection to the server with provided auto-upgrade 
property
-     * @param isAutoUpgradeEnabled
+     * @param isAutoUpgradeEnabled true if auto-upgrade is enabled
      * @return Phoenix JDBC driver
-     * @throws Exception
+     * @throws Exception if starting the minicluster fails
      */
-    private 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
firstConnectionAutoUpgradeToggle(boolean isAutoUpgradeEnabled)
-    throws Exception {
+    private PhoenixSysCatCreationTestingDriver firstConnAutoUpgradeToggle(
+            boolean isAutoUpgradeEnabled) throws Exception {
         if (isAutoUpgradeEnabled) {
-            return firstConnectionNSMappingServerDisabledClientDisabled();
+            return firstConnNSMappingServerDisabledClientDisabled();
         }
-        return firstConnectionAutoUpgradeDisabled();
+        return firstConnAutoUpgradeDisabled();
     }
 
     // Conditions: isAutoUpgradeEnabled is false
-    // Expected: We do not create SYSTEM.CATALOG even if this is the first 
connection to the server. Later, when we manually
-    // run "EXECUTE UPGRADE", we create SYSTEM tables
-    private 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
firstConnectionAutoUpgradeDisabled() throws Exception {
+    // Expected: We do not create SYSTEM.CATALOG even if this is the first 
connection to the server.
+    // Later, when we manually run "EXECUTE UPGRADE", we create SYSTEM tables
+    private PhoenixSysCatCreationTestingDriver firstConnAutoUpgradeDisabled() 
throws Exception {
         startMiniClusterWithToggleNamespaceMapping(Boolean.FALSE.toString());
         Map<String, String> props = new HashMap<>();
-        // Note that the isAutoUpgradeEnabled property is set when 
instantiating connection query services, not during init
+        // Note that the isAutoUpgradeEnabled property is set when 
instantiating connection
+        // query services, not during init
         props.put(QueryServices.AUTO_UPGRADE_ENABLED, 
Boolean.FALSE.toString());
         ReadOnlyProps readOnlyProps = new ReadOnlyProps(props);
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
-          new 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(readOnlyProps);
+        PhoenixSysCatCreationTestingDriver driver =
+                new PhoenixSysCatCreationTestingDriver(readOnlyProps);
 
         // We should be able to get a connection, however upgradeRequired 
should be set so that we
         // are not allowed to run any query/mutation until "EXECUTE UPGRADE" 
has been run
-        Connection conn = driver.getConnectionQueryServices(getJdbcUrl(), new 
Properties())
-                .connect(getJdbcUrl(), new Properties());
-        hbaseTables = getHBaseTables();
-        assertFalse(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG) || 
hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG));
-        assertEquals(0, hbaseTables.size());
-        assertEquals(1, countUpgradeAttempts);
-
-        // We use the same connection to run "EXECUTE UPGRADE"
-        try {
-            conn.createStatement().execute(EXECUTE_UPGRADE_COMMAND);
-        } finally {
-            conn.close();
+        try (Connection conn = driver.getConnectionQueryServices(getJdbcUrl(), 
new Properties())
+                .connect(getJdbcUrl(), new Properties())) {
+            hbaseTables = getHBaseTables();
+            assertFalse(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG) ||
+                    
hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG));
+            assertEquals(0, hbaseTables.size());
+            assertEquals(1, countUpgradeAttempts);
+
+            // We use the same connection to run "EXECUTE UPGRADE"
+            try (Statement stmt = conn.createStatement()) {
+                stmt.execute(EXECUTE_UPGRADE_COMMAND);
+            }
         }
         hbaseTables = getHBaseTables();
         assertEquals(PHOENIX_SYSTEM_TABLES, hbaseTables);
         return driver;
     }
 
-    // Conditions: server-side namespace mapping is enabled, client-side 
namespace mapping is enabled and system tables
-    // are to be mapped to the SYSTEM namespace.
-    // Expected: If this is the first connection to the server, we should be 
able to create all namespace mapped system tables i.e. SYSTEM:.*
-    private 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
firstConnectionNSMappingServerEnabledClientEnabled()
+    // Conditions: server-side namespace mapping is enabled, client-side 
namespace mapping is
+    // enabled and system tables are to be mapped to the SYSTEM namespace.
+    // Expected: If this is the first connection to the server, we should be 
able to create all
+    // namespace mapped system tables i.e. SYSTEM:.*
+    private PhoenixSysCatCreationTestingDriver 
firstConnNSMappingServerEnabledClientEnabled()
     throws Exception {
         startMiniClusterWithToggleNamespaceMapping(Boolean.TRUE.toString());
         Properties clientProps = getClientProperties(true, true);
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
-          new 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
+        PhoenixSysCatCreationTestingDriver driver =
+                new 
PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
         driver.getConnectionQueryServices(getJdbcUrl(), clientProps);
         hbaseTables = getHBaseTables();
         assertEquals(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES, hbaseTables);
@@ -581,16 +656,18 @@ public class SystemCatalogCreationOnConnectionIT {
         return driver;
     }
 
-    // Conditions: server-side namespace mapping is enabled, client-side 
namespace mapping is enabled, but mapping
-    // SYSTEM tables to the SYSTEM namespace is disabled
-    // Expected: If this is the first connection to the server, we will create 
unmapped SYSTEM tables i.e. SYSTEM\..*
-    private 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
firstConnectionNSMappingServerEnabledClientEnabledMappingDisabled()
-    throws Exception {
+    // Conditions: server-side namespace mapping is enabled, client-side 
namespace mapping is
+    // enabled, but mapping SYSTEM tables to the SYSTEM namespace is disabled
+    // Expected: If this is the first connection to the server, we will create 
unmapped
+    // SYSTEM tables i.e. SYSTEM\..*
+    private PhoenixSysCatCreationTestingDriver
+    firstConnNSMappingServerEnabledClientEnabledMappingDisabled() throws 
Exception {
         startMiniClusterWithToggleNamespaceMapping(Boolean.TRUE.toString());
-        // client-side namespace mapping is enabled, but mapping SYSTEM tables 
to SYSTEM namespace is not
+        // client-side namespace mapping is enabled, but mapping SYSTEM tables 
to SYSTEM namespace
+        // is not
         Properties clientProps = getClientProperties(true, false);
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
-          new 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
+        PhoenixSysCatCreationTestingDriver driver =
+                new 
PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
         driver.getConnectionQueryServices(getJdbcUrl(), clientProps);
         hbaseTables = getHBaseTables();
         assertEquals(PHOENIX_SYSTEM_TABLES, hbaseTables);
@@ -599,20 +676,23 @@ public class SystemCatalogCreationOnConnectionIT {
         return driver;
     }
 
-    // Conditions: server-side namespace mapping is enabled, client-side 
namespace mapping is disabled
+    // Conditions: server-side namespace mapping is enabled, client-side 
namespace mapping is
+    // disabled
     // Expected: Since this is the first connection to the server, we will 
immediately
     // throw an exception for inconsistent namespace mapping without creating 
any SYSTEM tables
-    private 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
firstConnectionNSMappingServerEnabledClientDisabled()
-    throws Exception {
+    private PhoenixSysCatCreationTestingDriver 
firstConnNSMappingServerEnabledClientDisabled()
+            throws Exception {
         startMiniClusterWithToggleNamespaceMapping(Boolean.TRUE.toString());
         Properties clientProps = getClientProperties(false, false);
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
-          new 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
+        PhoenixSysCatCreationTestingDriver driver =
+                new 
PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
         try {
             driver.getConnectionQueryServices(getJdbcUrl(), clientProps);
-            fail("Client should not be able to connect to cluster with 
inconsistent client-server namespace mapping properties");
+            fail("Client should not be able to connect to cluster with 
inconsistent client-server "
+                    + "namespace mapping properties");
         } catch (SQLException sqlE) {
-            
assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(),
 sqlE.getErrorCode());
+            
assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(),
+                    sqlE.getErrorCode());
         }
         hbaseTables = getHBaseTables();
         assertEquals(0, hbaseTables.size());
@@ -620,20 +700,24 @@ public class SystemCatalogCreationOnConnectionIT {
         return driver;
     }
 
-    // Conditions: server-side namespace mapping is disabled, client-side 
namespace mapping is enabled
-    // Expected: Since this is the first connection to the server, we will 
immediately throw an exception for
-    // inconsistent namespace mapping without creating any SYSTEM tables or 
SYSTEM namespace
-    private 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
firstConnectionNSMappingServerDisabledClientEnabled()
-    throws Exception {
+    // Conditions: server-side namespace mapping is disabled, client-side 
namespace mapping is
+    // enabled
+    // Expected: Since this is the first connection to the server, we will 
immediately throw an
+    // exception for inconsistent namespace mapping without creating any 
SYSTEM tables or
+    // SYSTEM namespace
+    private PhoenixSysCatCreationTestingDriver 
firstConnNSMappingServerDisabledClientEnabled()
+            throws Exception {
         startMiniClusterWithToggleNamespaceMapping(Boolean.FALSE.toString());
         Properties clientProps = getClientProperties(true, true);
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
-          new 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
+        PhoenixSysCatCreationTestingDriver driver =
+                new 
PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
         try {
             driver.getConnectionQueryServices(getJdbcUrl(), clientProps);
-            fail("Client should not be able to connect to cluster with 
inconsistent client-server namespace mapping properties");
+            fail("Client should not be able to connect to cluster with 
inconsistent client-server "
+                    + "namespace mapping properties");
         } catch (SQLException sqlE) {
-            
assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(),
 sqlE.getErrorCode());
+            
assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(),
+                    sqlE.getErrorCode());
         }
         hbaseTables = getHBaseTables();
         assertEquals(0, hbaseTables.size());
@@ -642,15 +726,16 @@ public class SystemCatalogCreationOnConnectionIT {
         return driver;
     }
 
-    // Conditions: server-side namespace mapping is disabled, client-side 
namespace mapping is disabled
-    // Expected: Since this is the first connection to the server and 
auto-upgrade is enabled by default,
-    // we will create all SYSTEM\..* tables
-    private 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
firstConnectionNSMappingServerDisabledClientDisabled()
-    throws Exception {
+    // Conditions: server-side namespace mapping is disabled, client-side 
namespace mapping is
+    // disabled
+    // Expected: Since this is the first connection to the server and 
auto-upgrade is enabled
+    // by default, we will create all SYSTEM\..* tables
+    private PhoenixSysCatCreationTestingDriver 
firstConnNSMappingServerDisabledClientDisabled()
+            throws Exception {
         startMiniClusterWithToggleNamespaceMapping(Boolean.FALSE.toString());
         Properties clientProps = getClientProperties(false, false);
-        SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
-          new 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
+        PhoenixSysCatCreationTestingDriver driver =
+                new 
PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
         driver.getConnectionQueryServices(getJdbcUrl(), clientProps);
         hbaseTables = getHBaseTables();
         assertEquals(PHOENIX_SYSTEM_TABLES, hbaseTables);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 69ad053..792091c 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -214,6 +214,7 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
     public static final String TASK_PRIORITY = "TASK_PRIORITY";
     public static final String TASK_DATA = "TASK_DATA";
     public static final String TASK_TABLE_TTL = "864000";
+    public static final int TTL_FOR_MUTEX = 15 * 60; // 15min
     public static final String ARRAY_SIZE = "ARRAY_SIZE";
     public static final byte[] ARRAY_SIZE_BYTES = Bytes.toBytes(ARRAY_SIZE);
     public static final String VIEW_CONSTANT = "VIEW_CONSTANT";
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index ea0acf0..a3f6ee2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -48,6 +48,10 @@ import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAM
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_NAME;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_TASK_TABLE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
@@ -55,6 +59,7 @@ import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TASK_TABLE_TTL;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TRANSACTIONAL;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_FOR_MUTEX;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HCONNECTIONS_COUNTER;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_PHOENIX_CONNECTIONS_THROTTLED_COUNTER;
@@ -299,7 +304,6 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
             LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
     private static final int INITIAL_CHILD_SERVICES_CAPACITY = 100;
     private static final int DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS = 
1000;
-    private static final int TTL_FOR_MUTEX = 15 * 60; // 15min
     private final GuidePostsCacheProvider
             GUIDE_POSTS_CACHE_PROVIDER = new GuidePostsCacheProvider();
     protected final Configuration config;
@@ -3113,7 +3117,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
     }
 
     protected String getMutexDDL() {
-        return setSystemDDLProperties(QueryConstants.CREATE_MUTEX_METADTA);
+        return setSystemDDLProperties(QueryConstants.CREATE_MUTEX_METADATA);
     }
 
     protected String getTaskDDL() {
@@ -3269,19 +3273,15 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
         }
     }
 
-    void createSysMutexTableIfNotExists(Admin admin) throws IOException, 
SQLException {
+    void createSysMutexTableIfNotExists(Admin admin) throws IOException {
         try {
-            if 
(admin.tableExists(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME))
-                    || admin.tableExists(TableName.valueOf(
-                        PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME,
-                        PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME))) {
-                LOGGER.debug("System mutex table already appears to exist, not 
creating it");
+            if (checkIfSysMutexExistsAndModifyTTLIfRequired(admin)) {
                 return;
             }
             final TableName mutexTableName = SchemaUtil.getPhysicalTableName(
-                    PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME, props);
+                    SYSTEM_MUTEX_NAME, props);
             TableDescriptor tableDesc = 
TableDescriptorBuilder.newBuilder(mutexTableName)
-                    .addColumnFamily(ColumnFamilyDescriptorBuilder
+                    .setColumnFamily(ColumnFamilyDescriptorBuilder
                             
.newBuilder(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES)
                             .setTimeToLive(TTL_FOR_MUTEX).build())
                     .build();
@@ -3302,6 +3302,48 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
         }
     }
 
+    /**
+     * Check if the SYSTEM MUTEX table exists. If it does, ensure that its TTL 
is correct and if
+     * not, modify its table descriptor
+     * @param admin HBase admin
+     * @return true if SYSTEM MUTEX exists already and false if it needs to be 
created
+     * @throws IOException thrown if there is an error getting the table 
descriptor
+     */
+    @VisibleForTesting
+    boolean checkIfSysMutexExistsAndModifyTTLIfRequired(Admin admin) throws 
IOException {
+        TableDescriptor htd;
+        try {
+            htd = admin.getDescriptor(TableName.valueOf(SYSTEM_MUTEX_NAME));
+        } catch (org.apache.hadoop.hbase.TableNotFoundException ignored) {
+            try {
+                // Try with the namespace mapping name
+                htd = admin.getDescriptor(TableName.valueOf(SYSTEM_SCHEMA_NAME,
+                        SYSTEM_MUTEX_TABLE_NAME));
+            } catch (org.apache.hadoop.hbase.TableNotFoundException ignored2) {
+                return false;
+            }
+        }
+
+        // The SYSTEM MUTEX table already exists so check its TTL
+        if 
(htd.getColumnFamily(SYSTEM_MUTEX_FAMILY_NAME_BYTES).getTimeToLive() != 
TTL_FOR_MUTEX) {
+            LOGGER.debug("SYSTEM MUTEX already appears to exist, but has the 
wrong TTL. " +
+                    "Will modify the TTL");
+            ColumnFamilyDescriptor hColFamDesc = ColumnFamilyDescriptorBuilder
+                    
.newBuilder(htd.getColumnFamily(SYSTEM_MUTEX_FAMILY_NAME_BYTES))
+                    .setTimeToLive(TTL_FOR_MUTEX)
+                    .build();
+            htd = TableDescriptorBuilder
+                    .newBuilder(htd)
+                    .modifyColumnFamily(hColFamDesc)
+                    .build();
+            admin.modifyTable(htd);
+        } else {
+            LOGGER.debug("SYSTEM MUTEX already appears to exist with the 
correct TTL, " +
+                    "not creating it");
+        }
+        return true;
+    }
+
     private boolean inspectIfAnyExceptionInChain(Throwable io, List<Class<? 
extends Exception>> ioList) {
         boolean exceptionToIgnore = false;
         for (Throwable t : Throwables.getCausalChain(io)) {
@@ -3346,13 +3388,13 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
         } catch (TableAlreadyExistsException ignore) {}
         try {
             metaConnection.createStatement().executeUpdate(getChildLinkDDL());
-        } catch (TableAlreadyExistsException e) {}
+        } catch (TableAlreadyExistsException ignore) {}
         try {
             metaConnection.createStatement().executeUpdate(getMutexDDL());
-        } catch (TableAlreadyExistsException e) {}
+        } catch (TableAlreadyExistsException ignore) {}
         try {
             metaConnection.createStatement().executeUpdate(getTaskDDL());
-        } catch (TableAlreadyExistsException e) {}
+        } catch (TableAlreadyExistsException ignore) {}
 
         // Catch the IOException to log the error message and then bubble it 
up for the client to retry.
     }
@@ -3924,7 +3966,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
                 // The COLUMN_FAMILY column should be nullable as we create a 
row in it without
                 // any column family to mark when guideposts were last 
collected.
                 metaConnection = removeNotNullConstraint(metaConnection,
-                        PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME,
+                        SYSTEM_SCHEMA_NAME,
                         PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE,
                         MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0,
                         PhoenixDatabaseMetaData.COLUMN_FAMILY);
@@ -4329,8 +4371,8 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
             
if(admin.tableExists(PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME)) {
                 sysMutexPhysicalTableNameBytes = 
PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES;
             } else if (admin.tableExists(TableName.valueOf(
-                    
SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME, 
props).getName()))) {
-                sysMutexPhysicalTableNameBytes = 
SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME, 
props).getName();
+                    SchemaUtil.getPhysicalTableName(SYSTEM_MUTEX_NAME, 
props).getName()))) {
+                sysMutexPhysicalTableNameBytes = 
SchemaUtil.getPhysicalTableName(SYSTEM_MUTEX_NAME, props).getName();
             }
         }
         return sysMutexPhysicalTableNameBytes;
@@ -4445,7 +4487,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
             .executeUpdate("DELETE FROM " + 
PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " WHERE "
                     + PhoenixDatabaseMetaData.TABLE_NAME + "='" + 
PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE
                     + "' AND " + PhoenixDatabaseMetaData.TABLE_SCHEM + "='"
-                    + PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME + "'");
+                    + SYSTEM_SCHEMA_NAME + "'");
         } catch (SQLException e) {
             LOGGER.warn("exception during upgrading stats table:" + e);
             sqlE = e;
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index acdb15e..45329de 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -189,7 +189,7 @@ public class ConnectionlessQueryServicesImpl extends 
DelegateQueryServices imple
     }
     
     protected String getMutexDDL() {
-        return setSystemDDLProperties(QueryConstants.CREATE_MUTEX_METADTA);
+        return setSystemDDLProperties(QueryConstants.CREATE_MUTEX_METADATA);
     }
 
     protected String getTaskDDL() {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index 3cbe924..a158acc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -18,8 +18,6 @@
 package org.apache.phoenix.query;
 
 
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.*;
-
 import java.math.BigDecimal;
 
 import org.apache.hadoop.hbase.HConstants;
@@ -28,7 +26,6 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
-import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.monitoring.MetricType;
 import org.apache.phoenix.schema.MetaDataSplitPolicy;
 import org.apache.phoenix.schema.PName;
@@ -40,6 +37,125 @@ import org.apache.phoenix.schema.SystemFunctionSplitPolicy;
 import org.apache.phoenix.schema.SystemStatsSplitPolicy;
 import org.apache.phoenix.schema.TableProperty;
 
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARG_POSITION;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.AUTO_PARTITION_SEQ;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.BASE_COLUMN_COUNT;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.BIND_PARAMETERS;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.BUFFER_LENGTH;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CACHE_SIZE;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHAR_OCTET_LENGTH;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CLASS_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CLIENT_IP;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_COUNT;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_DEF;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_QUALIFIER;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_QUALIFIER_COUNTER;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_SIZE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CURRENT_VALUE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CYCLE_FLAG;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TABLE_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DECIMAL_DIGITS;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_VALUE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DISABLE_WAL;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ENCODING_SCHEME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.EXCEPTION_TRACE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.EXPLAIN_PLAN;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.FUNCTION_NAME;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GLOBAL_SCAN_DETAILS;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GUIDE_POST_KEY;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_ROWS;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_STORAGE_SCHEME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INCREMENT_BY;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_STATE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_ARRAY;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_AUTOINCREMENT;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_CONSTANT;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_NULLABLE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_ROW_TIMESTAMP;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_VIEW_REFERENCED;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.JAR_PATH;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.KEY_SEQ;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_VALUE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VALUE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MULTI_TENANT;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NO_OF_RESULTS_ITERATED;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NULLABLE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NUM_ARGS;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NUM_PREC_RADIX;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PHOENIX_TTL;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PHOENIX_TTL_HWM;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PHYSICAL_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PK_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.QUERY;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.QUERY_ID;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.QUERY_STATUS;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.REF_GENERATION;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.REMARKS;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.RETURN_TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SALT_BUCKETS;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SCAN_METRICS_JSON;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SCOPE_CATALOG;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SCOPE_SCHEMA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SCOPE_TABLE;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SELF_REFERENCING_COL_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SEQUENCE_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SEQUENCE_SCHEMA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SORT_ORDER;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SOURCE_DATA_TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SQL_DATA_TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SQL_DATETIME_SUB;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.START_TIME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.START_WITH;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.STORE_NULLS;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_TABLE;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_TABLE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_LOG_TABLE;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_TASK_TABLE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SEQ_NUM;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TASK_DATA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TASK_END_TS;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TASK_PRIORITY;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TASK_STATUS;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TASK_TABLE_TTL;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TASK_TS;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TASK_TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TRANSACTIONAL;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TRANSACTION_PROVIDER;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_FOR_MUTEX;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.USER;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.USE_STATS_FOR_PARALLELIZATION;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE;
 
 /**
  *
@@ -49,131 +165,122 @@ import org.apache.phoenix.schema.TableProperty;
  * @since 0.1
  */
 public interface QueryConstants {
-    public static final String NAME_SEPARATOR = ".";
-    public static final String NAMESPACE_SEPARATOR = ":";
-    public static final String CHILD_VIEW_INDEX_NAME_SEPARATOR = "#";
-    public static final byte[] NAMESPACE_SEPARATOR_BYTES = 
Bytes.toBytes(NAMESPACE_SEPARATOR);
-    public static final byte NAMESPACE_SEPARATOR_BYTE = 
NAMESPACE_SEPARATOR_BYTES[0];
-    public static final String NAME_SEPARATOR_REGEX = "\\" + NAME_SEPARATOR;
-    public final static byte[] NAME_SEPARATOR_BYTES = 
Bytes.toBytes(NAME_SEPARATOR);
-    public static final byte NAME_SEPARATOR_BYTE = NAME_SEPARATOR_BYTES[0];
-    public static final String NULL_SCHEMA_NAME = "";
-    public static final String NULL_DISPLAY_TEXT = "<null>";
-    public static final long UNSET_TIMESTAMP = -1;
+    String NAME_SEPARATOR = ".";
+    String NAMESPACE_SEPARATOR = ":";
+    String CHILD_VIEW_INDEX_NAME_SEPARATOR = "#";
+    byte[] NAMESPACE_SEPARATOR_BYTES = Bytes.toBytes(NAMESPACE_SEPARATOR);
+    byte NAMESPACE_SEPARATOR_BYTE = NAMESPACE_SEPARATOR_BYTES[0];
+    String NAME_SEPARATOR_REGEX = "\\" + NAME_SEPARATOR;
+    byte[] NAME_SEPARATOR_BYTES = Bytes.toBytes(NAME_SEPARATOR);
+    byte NAME_SEPARATOR_BYTE = NAME_SEPARATOR_BYTES[0];
+    String NULL_DISPLAY_TEXT = "<null>";
+    long UNSET_TIMESTAMP = -1;
 
-    public enum JoinType {INNER, LEFT_OUTER}
-    public final static String SYSTEM_SCHEMA_NAME = "SYSTEM";
-    public final static byte[] SYSTEM_SCHEMA_NAME_BYTES = 
Bytes.toBytes(SYSTEM_SCHEMA_NAME);
-    public final static String HBASE_DEFAULT_SCHEMA_NAME = "default";
-    public final static String PHOENIX_METADATA = "table";
-    public final static String OFFSET_ROW_KEY = "_OFFSET_";
-    public final static byte[] OFFSET_ROW_KEY_BYTES = 
Bytes.toBytes(OFFSET_ROW_KEY);
-    public final static ImmutableBytesPtr OFFSET_ROW_KEY_PTR = new 
ImmutableBytesPtr(OFFSET_ROW_KEY_BYTES);
+    enum JoinType {INNER, LEFT_OUTER}
+    String SYSTEM_SCHEMA_NAME = "SYSTEM";
+    byte[] SYSTEM_SCHEMA_NAME_BYTES = Bytes.toBytes(SYSTEM_SCHEMA_NAME);
+    String HBASE_DEFAULT_SCHEMA_NAME = "default";
+    String OFFSET_ROW_KEY = "_OFFSET_";
+    byte[] OFFSET_ROW_KEY_BYTES = Bytes.toBytes(OFFSET_ROW_KEY);
+    ImmutableBytesPtr OFFSET_ROW_KEY_PTR = new 
ImmutableBytesPtr(OFFSET_ROW_KEY_BYTES);
 
-    public static final long AGG_TIMESTAMP = HConstants.LATEST_TIMESTAMP;
+    long AGG_TIMESTAMP = HConstants.LATEST_TIMESTAMP;
     /**
      * Key used for a single row aggregation where there is no group by
      */
-    public final static byte[] UNGROUPED_AGG_ROW_KEY = Bytes.toBytes("a");
+    byte[] UNGROUPED_AGG_ROW_KEY = Bytes.toBytes("a");
     
     /** BEGIN Set of reserved column qualifiers **/
     
-    public static final String RESERVED_COLUMN_FAMILY = "_v";
-    public static final byte[] RESERVED_COLUMN_FAMILY_BYTES = 
Bytes.toBytes(RESERVED_COLUMN_FAMILY);
+    String RESERVED_COLUMN_FAMILY = "_v";
+    byte[] RESERVED_COLUMN_FAMILY_BYTES = 
Bytes.toBytes(RESERVED_COLUMN_FAMILY);
     
-    public static final byte[] VALUE_COLUMN_FAMILY = 
RESERVED_COLUMN_FAMILY_BYTES;
-    public static final byte[] VALUE_COLUMN_QUALIFIER = 
QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS.encode(1);
+    byte[] VALUE_COLUMN_FAMILY = RESERVED_COLUMN_FAMILY_BYTES;
+    byte[] VALUE_COLUMN_QUALIFIER = 
QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS.encode(1);
     
-    public static final byte[] ARRAY_VALUE_COLUMN_FAMILY = 
RESERVED_COLUMN_FAMILY_BYTES;
-    public static final byte[] ARRAY_VALUE_COLUMN_QUALIFIER = 
QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS.encode(2);
+    byte[] ARRAY_VALUE_COLUMN_FAMILY = RESERVED_COLUMN_FAMILY_BYTES;
+    byte[] ARRAY_VALUE_COLUMN_QUALIFIER = 
QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS.encode(2);
     
-    public final static PName SINGLE_COLUMN_NAME = 
PNameFactory.newNormalizedName("s");
-    public final static PName SINGLE_COLUMN_FAMILY_NAME = 
PNameFactory.newNormalizedName("s");
-    public final static byte[] SINGLE_COLUMN = SINGLE_COLUMN_NAME.getBytes();
-    public final static byte[] SINGLE_COLUMN_FAMILY = 
SINGLE_COLUMN_FAMILY_NAME.getBytes();
+    PName SINGLE_COLUMN_NAME = PNameFactory.newNormalizedName("s");
+    PName SINGLE_COLUMN_FAMILY_NAME = PNameFactory.newNormalizedName("s");
+    byte[] SINGLE_COLUMN = SINGLE_COLUMN_NAME.getBytes();
+    byte[] SINGLE_COLUMN_FAMILY = SINGLE_COLUMN_FAMILY_NAME.getBytes();
 
     /** END Set of reserved column qualifiers **/
     
-    public static final byte[] TRUE = new byte[] {1};
+    byte[] TRUE = new byte[] {1};
     
     /**
      * The priority property for an hbase table. This is already in HTD, but 
older versions of
      * HBase do not have this, so we re-defined it here. Once Phoenix is 
HBase-1.3+, we can remote.
      */
-    public static final String PRIORITY = "PRIORITY";
+    String PRIORITY = "PRIORITY";
 
     /**
      * Separator used between variable length keys for a composite key.
      * Variable length data types may not use this byte value.
      */
-    public static final byte SEPARATOR_BYTE = (byte) 0;
-    public static final byte[] SEPARATOR_BYTE_ARRAY = new byte[] 
{SEPARATOR_BYTE};
-    public static final byte DESC_SEPARATOR_BYTE = 
SortOrder.invert(SEPARATOR_BYTE);
-    public static final byte[] DESC_SEPARATOR_BYTE_ARRAY = new byte[] 
{DESC_SEPARATOR_BYTE};
+    byte SEPARATOR_BYTE = (byte) 0;
+    byte[] SEPARATOR_BYTE_ARRAY = new byte[] {SEPARATOR_BYTE};
+    byte DESC_SEPARATOR_BYTE = SortOrder.invert(SEPARATOR_BYTE);
+    byte[] DESC_SEPARATOR_BYTE_ARRAY = new byte[] {DESC_SEPARATOR_BYTE};
 
-    public static final String DEFAULT_COPROCESS_PATH = "phoenix.jar";
-    public static final String DEFAULT_COPROCESS_JAR_NAME = 
"phoenix-[version]-server.jar";
+    String DEFAULT_COPROCESS_JAR_NAME = "phoenix-[version]-server.jar";
     
-    public final static int MILLIS_IN_DAY = 1000 * 60 * 60 * 24;
-
-    public static final String EMPTY_COLUMN_NAME = "_0";
+    int MILLIS_IN_DAY = 1000 * 60 * 60 * 24;
+    String EMPTY_COLUMN_NAME = "_0";
     // For transactional tables, the value of our empty key value can no 
longer be empty
     // since empty values are treated as column delete markers.
-    public static final byte[] EMPTY_COLUMN_BYTES = 
Bytes.toBytes(EMPTY_COLUMN_NAME);
-    public static final ImmutableBytesPtr EMPTY_COLUMN_BYTES_PTR = new 
ImmutableBytesPtr(
-            EMPTY_COLUMN_BYTES);
-    public static final Integer ENCODED_EMPTY_COLUMN_NAME = 0;
-    public static final byte[] ENCODED_EMPTY_COLUMN_BYTES = 
QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS.encode(ENCODED_EMPTY_COLUMN_NAME);
-    public final static String EMPTY_COLUMN_VALUE = "x";
-    public final static byte[] EMPTY_COLUMN_VALUE_BYTES = 
Bytes.toBytes(EMPTY_COLUMN_VALUE);
-    public static final ImmutableBytesPtr EMPTY_COLUMN_VALUE_BYTES_PTR = new 
ImmutableBytesPtr(
+    byte[] EMPTY_COLUMN_BYTES = Bytes.toBytes(EMPTY_COLUMN_NAME);
+    ImmutableBytesPtr EMPTY_COLUMN_BYTES_PTR = new 
ImmutableBytesPtr(EMPTY_COLUMN_BYTES);
+    Integer ENCODED_EMPTY_COLUMN_NAME = 0;
+    byte[] ENCODED_EMPTY_COLUMN_BYTES = 
QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS.encode(
+            ENCODED_EMPTY_COLUMN_NAME);
+    String EMPTY_COLUMN_VALUE = "x";
+    byte[] EMPTY_COLUMN_VALUE_BYTES = Bytes.toBytes(EMPTY_COLUMN_VALUE);
+    ImmutableBytesPtr EMPTY_COLUMN_VALUE_BYTES_PTR = new ImmutableBytesPtr(
             EMPTY_COLUMN_VALUE_BYTES);
-    public static final String ENCODED_EMPTY_COLUMN_VALUE = EMPTY_COLUMN_VALUE;
-    public final static byte[] ENCODED_EMPTY_COLUMN_VALUE_BYTES = 
Bytes.toBytes(EMPTY_COLUMN_VALUE);
-    public static final ImmutableBytesPtr ENCODED_EMPTY_COLUMN_VALUE_BYTES_PTR 
= new ImmutableBytesPtr(
-            ENCODED_EMPTY_COLUMN_VALUE_BYTES);
-    public static final String DEFAULT_COLUMN_FAMILY = "0";
-    public static final byte[] DEFAULT_COLUMN_FAMILY_BYTES = 
Bytes.toBytes(DEFAULT_COLUMN_FAMILY);
-    public static final ImmutableBytesPtr DEFAULT_COLUMN_FAMILY_BYTES_PTR = 
new ImmutableBytesPtr(
+    byte[] ENCODED_EMPTY_COLUMN_VALUE_BYTES = 
Bytes.toBytes(EMPTY_COLUMN_VALUE);
+    String DEFAULT_COLUMN_FAMILY = "0";
+    byte[] DEFAULT_COLUMN_FAMILY_BYTES = Bytes.toBytes(DEFAULT_COLUMN_FAMILY);
+    ImmutableBytesPtr DEFAULT_COLUMN_FAMILY_BYTES_PTR = new ImmutableBytesPtr(
             DEFAULT_COLUMN_FAMILY_BYTES);
-    // column qualifier of the single key value used to store all columns for 
the COLUMNS_STORED_IN_SINGLE_CELL storage scheme
-    public static final String SINGLE_KEYVALUE_COLUMN_QUALIFIER = "1";
-    public final static byte[] SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES = 
Bytes.toBytes(SINGLE_KEYVALUE_COLUMN_QUALIFIER);
-    public static final ImmutableBytesPtr 
SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES_PTR = new ImmutableBytesPtr(
+    // column qualifier of the single key value used to store all columns for 
the
+    // COLUMNS_STORED_IN_SINGLE_CELL storage scheme
+    String SINGLE_KEYVALUE_COLUMN_QUALIFIER = "1";
+    byte[] SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES = 
Bytes.toBytes(SINGLE_KEYVALUE_COLUMN_QUALIFIER);
+    ImmutableBytesPtr SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES_PTR = new 
ImmutableBytesPtr(
             SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES);
 
-    public static final String LOCAL_INDEX_COLUMN_FAMILY_PREFIX = "L#";
-    public static final byte[] LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES = 
Bytes.toBytes(LOCAL_INDEX_COLUMN_FAMILY_PREFIX);
-    public static final ImmutableBytesPtr LOCAL_INDEX_COLUMN_FAMILY_PREFIX_PTR 
= new ImmutableBytesPtr(
-        LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES);
-    
-    public static final String DEFAULT_LOCAL_INDEX_COLUMN_FAMILY = 
LOCAL_INDEX_COLUMN_FAMILY_PREFIX + DEFAULT_COLUMN_FAMILY;
-    public static final byte[] DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES = 
Bytes.toBytes(DEFAULT_LOCAL_INDEX_COLUMN_FAMILY);
-    public static final ImmutableBytesPtr 
DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES_PTR = new ImmutableBytesPtr(
+    String LOCAL_INDEX_COLUMN_FAMILY_PREFIX = "L#";
+    byte[] LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES = 
Bytes.toBytes(LOCAL_INDEX_COLUMN_FAMILY_PREFIX);
+
+    String DEFAULT_LOCAL_INDEX_COLUMN_FAMILY = 
LOCAL_INDEX_COLUMN_FAMILY_PREFIX +
+            DEFAULT_COLUMN_FAMILY;
+    byte[] DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES = Bytes.toBytes(
+            DEFAULT_LOCAL_INDEX_COLUMN_FAMILY);
+    ImmutableBytesPtr DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES_PTR = new 
ImmutableBytesPtr(
                DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES);
 
-    public static final String GLOBAL_INDEX_VERIFIED_COLUMN_QUALIFIER = 
EMPTY_COLUMN_NAME;
-    public static final byte[] GLOBAL_INDEX_VERIFIED_COLUMN_NAME_BYTES = 
Bytes.toBytes(GLOBAL_INDEX_VERIFIED_COLUMN_QUALIFIER);
-            ;
-    public static final String ALL_FAMILY_PROPERTIES_KEY = "";
-    public static final String SYSTEM_TABLE_PK_NAME = "pk";
+    String ALL_FAMILY_PROPERTIES_KEY = "";
+    String SYSTEM_TABLE_PK_NAME = "pk";
 
-    public static final double MILLIS_TO_NANOS_CONVERTOR = Math.pow(10, 6);
-    public static final BigDecimal BD_MILLIS_NANOS_CONVERSION = 
BigDecimal.valueOf(MILLIS_TO_NANOS_CONVERTOR);
-    public static final BigDecimal BD_MILLIS_IN_DAY = 
BigDecimal.valueOf(QueryConstants.MILLIS_IN_DAY);
-    public static final int MAX_ALLOWED_NANOS = 999999999;
-    public static final int NANOS_IN_SECOND = BigDecimal.valueOf(Math.pow(10, 
9)).intValue();
-    public static final int DIVERGED_VIEW_BASE_COLUMN_COUNT = -100;
-    public static final int BASE_TABLE_BASE_COLUMN_COUNT = -1;
+    double MILLIS_TO_NANOS_CONVERTOR = Math.pow(10, 6);
+    BigDecimal BD_MILLIS_NANOS_CONVERSION = 
BigDecimal.valueOf(MILLIS_TO_NANOS_CONVERTOR);
+    BigDecimal BD_MILLIS_IN_DAY = 
BigDecimal.valueOf(QueryConstants.MILLIS_IN_DAY);
+    int MAX_ALLOWED_NANOS = 999999999;
+    int DIVERGED_VIEW_BASE_COLUMN_COUNT = -100;
+    int BASE_TABLE_BASE_COLUMN_COUNT = -1;
 
     // custom TagType
-    public static final byte VIEW_MODIFIED_PROPERTY_TAG_TYPE = (byte) 70;
+    byte VIEW_MODIFIED_PROPERTY_TAG_TYPE = (byte) 70;
     /**
-     * We mark counter values 0 to 10 as reserved. Value 0 is used by {@link 
#ENCODED_EMPTY_COLUMN_NAME}. Values 1-10
+     * We mark counter values 0 to 10 as reserved. Value 0 is used by
+     * {@link #ENCODED_EMPTY_COLUMN_NAME}. Values 1-10
      * are reserved for special column qualifiers returned by Phoenix 
co-processors.
      */
-    public static final int ENCODED_CQ_COUNTER_INITIAL_VALUE = 11;
-    public static final String CREATE_TABLE_METADATA =
+    int ENCODED_CQ_COUNTER_INITIAL_VALUE = 11;
+    String CREATE_TABLE_METADATA =
             // Do not use IF NOT EXISTS as we sometimes catch the 
TableAlreadyExists
             // exception and add columns to the SYSTEM.TABLE dynamically.
             "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_CATALOG_TABLE + "\"(\n" +
@@ -253,10 +360,11 @@ public interface QueryConstants {
             + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + 
COLUMN_FAMILY + "))\n" +
             HConstants.VERSIONS + "=%s,\n" +
             ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" +
-            TableDescriptorBuilder.SPLIT_POLICY + "='" + 
MetaDataSplitPolicy.class.getName() + "',\n" + 
-            PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
+            // Install split policy to prevent a tenant's metadata from being 
split across regions.
+            TableDescriptorBuilder.SPLIT_POLICY + "='" + 
MetaDataSplitPolicy.class.getName() +
+            "',\n" + TRANSACTIONAL + "=" + Boolean.FALSE;
 
-    public static final String CREATE_STATS_TABLE_METADATA =
+    String CREATE_STATS_TABLE_METADATA =
             "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_STATS_TABLE + "\"(\n" +
             // PK columns
             PHYSICAL_NAME  + " VARCHAR NOT NULL," +
@@ -268,11 +376,12 @@ public interface QueryConstants {
             "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY ("
             + PHYSICAL_NAME + ","
             + COLUMN_FAMILY + ","+ GUIDE_POST_KEY+"))\n" +
-            // Install split policy to prevent a physical table's stats from 
being split across regions.
+            // Install split policy to prevent a physical table's stats from 
being split
+            // across regions.
             TableDescriptorBuilder.SPLIT_POLICY + "='" + 
SystemStatsSplitPolicy.class.getName() + "',\n" + 
-            PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
+            TRANSACTIONAL + "=" + Boolean.FALSE;
 
-    public static final String CREATE_SEQUENCE_METADATA =
+    String CREATE_SEQUENCE_METADATA =
             "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + TYPE_SEQUENCE + 
"\"(\n" +
             TENANT_ID + " VARCHAR NULL," +
             SEQUENCE_SCHEMA + " VARCHAR NULL, \n" +
@@ -286,14 +395,15 @@ public interface QueryConstants {
             MAX_VALUE + " BIGINT, \n" +
             CYCLE_FLAG + " BOOLEAN, \n" +
             LIMIT_REACHED_FLAG + " BOOLEAN \n" +
-            " CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + 
TENANT_ID + "," + SEQUENCE_SCHEMA + "," + SEQUENCE_NAME + "))\n" +
+            " CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" +
+            TENANT_ID + "," + SEQUENCE_SCHEMA + "," + SEQUENCE_NAME + "))\n" +
             HConstants.VERSIONS + "=%s,\n" +
             ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n"+
-            PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
-    public static final String CREATE_SYSTEM_SCHEMA = "CREATE SCHEMA " + 
SYSTEM_CATALOG_SCHEMA;
-    public static final String UPGRADE_TABLE_SNAPSHOT_PREFIX = 
"_UPGRADING_TABLE_";
+            TRANSACTIONAL + "=" + Boolean.FALSE;
+
+    String UPGRADE_TABLE_SNAPSHOT_PREFIX = "_UPGRADING_TABLE_";
 
-    public static final String CREATE_FUNCTION_METADATA =
+    String CREATE_FUNCTION_METADATA =
             "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_FUNCTION_TABLE + "\"(\n" +
              // Pk columns
             TENANT_ID + " VARCHAR NULL," +
@@ -311,16 +421,17 @@ public interface QueryConstants {
             DEFAULT_VALUE + " VARCHAR, \n" +
             MIN_VALUE + " VARCHAR, \n" +
             MAX_VALUE + " VARCHAR, \n" +
-            " CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + 
TENANT_ID + ", " + FUNCTION_NAME + ", " + TYPE + ", " + ARG_POSITION + "))\n" +
+            " CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + 
TENANT_ID + ", " +
+            FUNCTION_NAME + ", " + TYPE + ", " + ARG_POSITION + "))\n" +
             HConstants.VERSIONS + "=%s,\n" +
             ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n"+
             // Install split policy to prevent a tenant's metadata from being 
split across regions.
-            TableDescriptorBuilder.SPLIT_POLICY + "='" + 
SystemFunctionSplitPolicy.class.getName() + "',\n" + 
-            PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
-    
-    public static final String CREATE_LOG_METADATA =
+            TableDescriptorBuilder.SPLIT_POLICY + "='" + 
SystemFunctionSplitPolicy.class.getName() +
+            "',\n" + TRANSACTIONAL + "=" + Boolean.FALSE;
+
+    String CREATE_LOG_METADATA =
             "CREATE IMMUTABLE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_LOG_TABLE + "\"(\n" +
-             // Pk columns
+            // Pk columns
             START_TIME + " DECIMAL, \n" +
             TABLE_NAME + " VARCHAR, \n" +
             QUERY_ID + " VARCHAR NOT NULL,\n" +
@@ -338,23 +449,24 @@ public interface QueryConstants {
             BIND_PARAMETERS + " VARCHAR, \n" +
             SCAN_METRICS_JSON + " VARCHAR, \n" +
             MetricType.getMetricColumnsDetails()+"\n"+
-            " CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (START_TIME, 
TABLE_NAME, QUERY_ID))\n" +
-            PhoenixDatabaseMetaData.SALT_BUCKETS + "=%s,\n"+
-            PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE+ ",\n" 
+
+            " CONSTRAINT " + SYSTEM_TABLE_PK_NAME +
+            " PRIMARY KEY (START_TIME, TABLE_NAME, QUERY_ID))\n" +
+            SALT_BUCKETS + "=%s,\n"+
+            TRANSACTIONAL + "=" + Boolean.FALSE+ ",\n" +
             ColumnFamilyDescriptorBuilder.TTL + "=" + 
MetaDataProtocol.DEFAULT_LOG_TTL+",\n"+
-            TableProperty.IMMUTABLE_STORAGE_SCHEME.toString() + " = " + 
ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS.name() + ",\n" +
+            TableProperty.IMMUTABLE_STORAGE_SCHEME.toString() + " = " +
+            ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS.name() + 
",\n" +
             TableProperty.COLUMN_ENCODED_BYTES.toString()+" = 1";
     
-    public static final byte[] OFFSET_FAMILY = "f_offset".getBytes();
-    public static final byte[] OFFSET_COLUMN = "c_offset".getBytes();
-    public static final String LAST_SCAN = "LAST_SCAN";
-    public static final byte[] UPGRADE_MUTEX = "UPGRADE_MUTEX".getBytes();
-    public static final String HASH_JOIN_CACHE_RETRIES = 
"hashjoin.client.retries.number";
-    public static final int DEFAULT_HASH_JOIN_CACHE_RETRIES = 5;
+    byte[] OFFSET_FAMILY = "f_offset".getBytes();
+    byte[] OFFSET_COLUMN = "c_offset".getBytes();
+    String LAST_SCAN = "LAST_SCAN";
+    String HASH_JOIN_CACHE_RETRIES = "hashjoin.client.retries.number";
+    int DEFAULT_HASH_JOIN_CACHE_RETRIES = 5;
     
        // Links from parent to child views are stored in a separate table for
        // scalability
-       public static final String CREATE_CHILD_LINK_METADATA = "CREATE TABLE " 
+ SYSTEM_CATALOG_SCHEMA + ".\"" +
+       String CREATE_CHILD_LINK_METADATA = "CREATE TABLE " + 
SYSTEM_CATALOG_SCHEMA + ".\"" +
             SYSTEM_CHILD_LINK_TABLE + "\"(\n" +
                        // PK columns
                        TENANT_ID + " VARCHAR NULL," +
@@ -363,27 +475,30 @@ public interface QueryConstants {
             COLUMN_NAME + " VARCHAR NULL," +
             COLUMN_FAMILY + " VARCHAR NULL," +
             LINK_TYPE + " UNSIGNED_TINYINT,\n" +
-            "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + 
TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," +
+            "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" +
+            TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," +
             COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" +
             HConstants.VERSIONS + "=%s,\n" +
             ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" +
-            PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
-       
-       public static final String CREATE_MUTEX_METADTA =
-            "CREATE IMMUTABLE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_MUTEX_TABLE_NAME + "\"(\n" +
-             // Pk columns
+            TRANSACTIONAL + "=" + Boolean.FALSE;
+
+    String CREATE_MUTEX_METADATA =
+            "CREATE IMMUTABLE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" +
+            SYSTEM_MUTEX_TABLE_NAME + "\"(\n" +
+            // Pk columns
             TENANT_ID + " VARCHAR NULL," +
             TABLE_SCHEM + " VARCHAR NULL," +
-               TABLE_NAME + " VARCHAR NOT NULL," +
-               COLUMN_NAME + " VARCHAR NULL," + // null for table row
-               COLUMN_FAMILY + " VARCHAR NULL " + // using for CF to 
uniqueness for columns
-               "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + 
TENANT_ID + ","
-               + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + 
COLUMN_FAMILY + "))\n" +
-               HConstants.VERSIONS + "=%s,\n" +
-               ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" +
-               PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
+            TABLE_NAME + " VARCHAR NOT NULL," +
+            COLUMN_NAME + " VARCHAR NULL," + // null for table row
+            COLUMN_FAMILY + " VARCHAR NULL " + // using for CF to uniqueness 
for columns
+            "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + 
TENANT_ID + "," +
+            TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + 
COLUMN_FAMILY + "))\n" +
+            HConstants.VERSIONS + "=%s,\n" +
+            ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" +
+            TRANSACTIONAL + "=" + Boolean.FALSE + ",\n" +
+            ColumnFamilyDescriptorBuilder.TTL + "=" + TTL_FOR_MUTEX;
 
-       public static final String CREATE_TASK_METADATA =
+       String CREATE_TASK_METADATA =
             "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_TASK_TABLE + "\"(\n" +
             // PK columns
             TASK_TYPE + " UNSIGNED_TINYINT NOT NULL," +
@@ -396,11 +511,12 @@ public interface QueryConstants {
             TASK_END_TS + " TIMESTAMP NULL," +
             TASK_PRIORITY + " UNSIGNED_TINYINT NULL," +
             TASK_DATA + " VARCHAR NULL,\n" +
-            "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + 
TASK_TYPE + "," + TASK_TS + " ROW_TIMESTAMP," + TENANT_ID + "," + TABLE_SCHEM + 
"," +
+            "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" +
+            TASK_TYPE + "," + TASK_TS + " ROW_TIMESTAMP," + TENANT_ID + "," + 
TABLE_SCHEM + "," +
             TABLE_NAME + "))\n" +
             HConstants.VERSIONS + "=%s,\n" +
             ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" +
             ColumnFamilyDescriptorBuilder.TTL + "=" + TASK_TABLE_TTL + ",\n" + 
    // 10 days
-            PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE + 
",\n" +
+            TRANSACTIONAL + "=" + Boolean.FALSE + ",\n" +
             STORE_NULLS + "=" + Boolean.TRUE;
 }
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
index a183907..dc3b7b8 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
@@ -16,62 +16,137 @@
  */
 package org.apache.phoenix.query;
 
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_FOR_MUTEX;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyString;
 import static org.mockito.Mockito.doCallRealMethod;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
+import java.io.IOException;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.phoenix.exception.PhoenixIOException;
 import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
 
 public class ConnectionQueryServicesImplTest {
-    private static final PhoenixIOException PHOENIX_IO_EXCEPTION = new 
PhoenixIOException(new Exception("Test exception"));
+    private static final PhoenixIOException PHOENIX_IO_EXCEPTION =
+            new PhoenixIOException(new Exception("Test exception"));
+    private TableDescriptor sysMutexTableDescCorrectTTL = 
TableDescriptorBuilder
+            .newBuilder(TableName.valueOf(SYSTEM_MUTEX_NAME))
+            .setColumnFamily(ColumnFamilyDescriptorBuilder
+                    .newBuilder(SYSTEM_MUTEX_FAMILY_NAME_BYTES)
+                    .setTimeToLive(TTL_FOR_MUTEX)
+                    .build())
+            .build();
+
+    @Mock
+    private ConnectionQueryServicesImpl mockCqs;
+    @Mock
+    private Admin mockAdmin;
+
+    @Before
+    public void reset() throws IOException {
+        MockitoAnnotations.initMocks(this);
+        when(mockCqs.checkIfSysMutexExistsAndModifyTTLIfRequired(mockAdmin))
+                .thenCallRealMethod();
+    }
 
     @SuppressWarnings("unchecked")
     @Test
     public void testExceptionHandlingOnSystemNamespaceCreation() throws 
Exception {
-        ConnectionQueryServicesImpl cqs = 
mock(ConnectionQueryServicesImpl.class);
         // Invoke the real methods for these two calls
-        when(cqs.createSchema(any(List.class), 
anyString())).thenCallRealMethod();
-        
doCallRealMethod().when(cqs).ensureSystemTablesMigratedToSystemNamespace();
+        when(mockCqs.createSchema(any(List.class), 
anyString())).thenCallRealMethod();
+        
doCallRealMethod().when(mockCqs).ensureSystemTablesMigratedToSystemNamespace();
         // Do nothing for this method, just check that it was invoked later
-        doNothing().when(cqs).createSysMutexTableIfNotExists(any(Admin.class));
+        
doNothing().when(mockCqs).createSysMutexTableIfNotExists(any(Admin.class));
 
         // Spoof out this call so that ensureSystemTablesUpgrade() will 
return-fast.
-        
when(cqs.getSystemTableNamesInDefaultNamespace(any(Admin.class))).thenReturn(Collections.<TableName>
 emptyList());
+        when(mockCqs.getSystemTableNamesInDefaultNamespace(any(Admin.class)))
+                .thenReturn(Collections.<TableName> emptyList());
 
         // Throw a special exception to check on later
-        
doThrow(PHOENIX_IO_EXCEPTION).when(cqs).ensureNamespaceCreated(anyString());
+        
doThrow(PHOENIX_IO_EXCEPTION).when(mockCqs).ensureNamespaceCreated(anyString());
 
-        // Make sure that ensureSystemTablesMigratedToSystemNamespace will try 
to migrate the system tables.
+        // Make sure that ensureSystemTablesMigratedToSystemNamespace will try 
to migrate
+        // the system tables.
         Map<String,String> props = new HashMap<>();
         props.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
-        when(cqs.getProps()).thenReturn(new ReadOnlyProps(props));
-        cqs.ensureSystemTablesMigratedToSystemNamespace();
+        when(mockCqs.getProps()).thenReturn(new ReadOnlyProps(props));
+        mockCqs.ensureSystemTablesMigratedToSystemNamespace();
 
         // Should be called after upgradeSystemTables()
         // Proves that execution proceeded
-        verify(cqs).getSystemTableNamesInDefaultNamespace(any(Admin.class));
+        
verify(mockCqs).getSystemTableNamesInDefaultNamespace(any(Admin.class));
 
         try {
             // Verifies that the exception is propagated back to the caller
-            cqs.createSchema(Collections.<Mutation> emptyList(), "");
+            mockCqs.createSchema(Collections.<Mutation> emptyList(), "");
         } catch (PhoenixIOException e) {
             assertEquals(PHOENIX_IO_EXCEPTION, e);
         }
     }
+
+    @Test
+    public void testSysMutexCheckReturnsFalseWhenTableAbsent() throws 
Exception {
+        // Override the getDescriptor() call to throw instead
+        doThrow(new TableNotFoundException())
+                .when(mockAdmin)
+                .getDescriptor(TableName.valueOf(SYSTEM_MUTEX_NAME));
+        doThrow(new TableNotFoundException())
+                .when(mockAdmin)
+                .getDescriptor(TableName.valueOf(SYSTEM_SCHEMA_NAME, 
SYSTEM_MUTEX_TABLE_NAME));
+        
assertFalse(mockCqs.checkIfSysMutexExistsAndModifyTTLIfRequired(mockAdmin));
+    }
+
+    @Test
+    public void testSysMutexCheckModifiesTTLWhenWrong() throws Exception {
+        // Set the wrong TTL
+        TableDescriptor sysMutexTableDescWrongTTL = TableDescriptorBuilder
+                .newBuilder(TableName.valueOf(SYSTEM_MUTEX_NAME))
+                .setColumnFamily(ColumnFamilyDescriptorBuilder
+                        .newBuilder(SYSTEM_MUTEX_FAMILY_NAME_BYTES)
+                        .setTimeToLive(HConstants.FOREVER)
+                        .build())
+                .build();
+        when(mockAdmin.getDescriptor(TableName.valueOf(SYSTEM_MUTEX_NAME)))
+                .thenReturn(sysMutexTableDescWrongTTL);
+
+        
assertTrue(mockCqs.checkIfSysMutexExistsAndModifyTTLIfRequired(mockAdmin));
+        verify(mockAdmin, 
Mockito.times(1)).modifyTable(sysMutexTableDescCorrectTTL);
+    }
+
+    @Test
+    public void testSysMutexCheckDoesNotModifyTableDescWhenTTLCorrect() throws 
Exception {
+        when(mockAdmin.getDescriptor(TableName.valueOf(SYSTEM_MUTEX_NAME)))
+                .thenReturn(sysMutexTableDescCorrectTTL);
+
+        
assertTrue(mockCqs.checkIfSysMutexExistsAndModifyTTLIfRequired(mockAdmin));
+        verify(mockAdmin, 
Mockito.times(0)).modifyTable(any(TableDescriptor.class));
+    }
 }

Reply via email to