PHOENIX-4579 Add a config to conditionally create Phoenix meta tables on first client connection (Chinmay Kulkarni)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8cda8141 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8cda8141 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8cda8141 Branch: refs/heads/4.x-cdh5.14 Commit: 8cda81411294ead4b5642fca842c9c56a15e8652 Parents: f71654a Author: James Taylor <jtay...@salesforce.com> Authored: Fri Apr 13 18:30:30 2018 +0100 Committer: Pedro Boado <pbo...@apache.org> Committed: Fri Apr 13 23:31:28 2018 +0100 ---------------------------------------------------------------------- .../phoenix/end2end/AppendOnlySchemaIT.java | 2 +- .../MigrateSystemTablesToSystemNamespaceIT.java | 9 +- .../SystemCatalogCreationOnConnectionIT.java | 626 ++++++++++++++++ .../coprocessor/MetaDataEndpointImpl.java | 21 + .../phoenix/coprocessor/MetaDataProtocol.java | 4 + .../coprocessor/generated/MetaDataProtos.java | 183 +++-- .../exception/UpgradeRequiredException.java | 13 +- .../phoenix/query/ConnectionQueryServices.java | 2 +- .../query/ConnectionQueryServicesImpl.java | 750 +++++++++++-------- .../query/ConnectionlessQueryServicesImpl.java | 2 +- .../query/DelegateConnectionQueryServices.java | 4 +- .../apache/phoenix/schema/MetaDataClient.java | 3 +- .../query/ConnectionQueryServicesImplTest.java | 7 +- phoenix-protocol/src/main/MetaDataService.proto | 1 + 14 files changed, 1236 insertions(+), 391 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/phoenix/blob/8cda8141/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java index 7ed64ff..d601beb 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java @@ -114,7 +114,7 @@ public class AppendOnlySchemaIT extends ParallelStatsDisabledIT { // verify no create table rpcs verify(connectionQueryServices, never()).createTable(anyListOf(Mutation.class), any(byte[].class), any(PTableType.class), anyMap(), anyList(), any(byte[][].class), - eq(false), eq(false)); + eq(false), eq(false), eq(false)); reset(connectionQueryServices); // execute alter table ddl that adds the same column http://git-wip-us.apache.org/repos/asf/phoenix/blob/8cda8141/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java index d20ffa9..b0f1d5f 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java @@ -377,8 +377,10 @@ public class MigrateSystemTablesToSystemNamespaceIT extends BaseTest { while(rs.next()) { if(rs.getString("IS_NAMESPACE_MAPPED") == null) { + // Check that entry for SYSTEM namespace exists in SYSCAT systemSchemaExists = rs.getString("TABLE_SCHEM").equals(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME) ? true : systemSchemaExists; } else if (rs.getString("COLUMN_NAME") == null) { + // Found the intial entry for a table in SYSCAT String schemaName = rs.getString("TABLE_SCHEM"); String tableName = rs.getString("TABLE_NAME"); @@ -396,12 +398,11 @@ public class MigrateSystemTablesToSystemNamespaceIT extends BaseTest { } } - if(!systemSchemaExists) { - fail(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME + " entry doesn't exist in SYSTEM.CATALOG table."); - } - // The set will contain SYSMUTEX table since that table is not exposed in SYSCAT if (systemTablesMapped) { + if (!systemSchemaExists) { + fail(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME + " entry doesn't exist in SYSTEM.CATALOG table."); + } assertTrue(namespaceMappedSystemTablesSet.size() == 1); } else { assertTrue(systemTablesSet.size() == 1); http://git-wip-us.apache.org/repos/asf/phoenix/blob/8cda8141/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java new file mode 100644 index 0000000..689eb20 --- /dev/null +++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java @@ -0,0 +1,626 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.end2end; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.phoenix.coprocessor.MetaDataProtocol; +import org.apache.phoenix.exception.SQLExceptionCode; +import org.apache.phoenix.exception.UpgradeRequiredException; +import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver; +import org.apache.phoenix.jdbc.PhoenixTestDriver; +import org.apache.phoenix.query.*; +import org.apache.phoenix.util.ReadOnlyProps; +import org.apache.phoenix.util.UpgradeUtil; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.*; +import java.util.concurrent.TimeoutException; + +import static org.junit.Assert.*; + +@Category(NeedsOwnMiniClusterTest.class) +public class SystemCatalogCreationOnConnectionIT { + private HBaseTestingUtility testUtil = null; + private Set<String> hbaseTables; + private static boolean setOldTimestampToInduceUpgrade = false; + private static int countUpgradeAttempts; + // This flag is used to figure out if the SYSCAT schema was actually upgraded or not, based on the timestamp of SYSCAT + // (different from an upgrade attempt) + private static int actualSysCatUpgrades; + private static final String PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG = "SYSTEM:CATALOG"; + private static final String PHOENIX_SYSTEM_CATALOG = "SYSTEM.CATALOG"; + private static final String EXECUTE_UPGRADE_COMMAND = "EXECUTE UPGRADE"; + private static final String MODIFIED_MAX_VERSIONS ="5"; + + private static final Set<String> PHOENIX_SYSTEM_TABLES = new HashSet<>(Arrays.asList( + "SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", "SYSTEM.FUNCTION", + "SYSTEM.MUTEX", "SYSTEM.LOG")); + + private static final Set<String> PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES = new HashSet<>( + Arrays.asList("SYSTEM:CATALOG", "SYSTEM:SEQUENCE", "SYSTEM:STATS", "SYSTEM:FUNCTION", + "SYSTEM:MUTEX", "SYSTEM:LOG")); + + private static class PhoenixSysCatCreationServices extends ConnectionQueryServicesImpl { + + public PhoenixSysCatCreationServices(QueryServices services, PhoenixEmbeddedDriver.ConnectionInfo connectionInfo, Properties info) { + super(services, connectionInfo, info); + } + + @Override + protected void setUpgradeRequired() { + super.setUpgradeRequired(); + countUpgradeAttempts++; + } + + @Override + protected long getSystemTableVersion() { + if (setOldTimestampToInduceUpgrade) { + // Return the next lower version where an upgrade was performed to induce setting the upgradeRequired flag + return MetaDataProtocol.getPriorUpgradeVersion(); + } + return MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP; + } + + @Override + protected PhoenixConnection upgradeSystemCatalogIfRequired(PhoenixConnection metaConnection, + long currentServerSideTableTimeStamp) throws InterruptedException, SQLException, TimeoutException, IOException { + PhoenixConnection newMetaConnection = super.upgradeSystemCatalogIfRequired(metaConnection, currentServerSideTableTimeStamp); + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP) { + actualSysCatUpgrades++; + } + return newMetaConnection; + } + } + + public static class PhoenixSysCatCreationTestingDriver extends PhoenixTestDriver { + private ConnectionQueryServices cqs; + private final ReadOnlyProps overrideProps; + + public PhoenixSysCatCreationTestingDriver(ReadOnlyProps props) { + overrideProps = props; + } + + @Override // public for testing + public synchronized ConnectionQueryServices getConnectionQueryServices(String url, Properties info) throws SQLException { + if (cqs == null) { + cqs = new PhoenixSysCatCreationServices(new QueryServicesTestImpl(getDefaultProps(), overrideProps), ConnectionInfo.create(url), info); + cqs.init(url, info); + } + return cqs; + } + + // NOTE: Do not use this if you want to try re-establishing a connection from the client using a previously + // used ConnectionQueryServices instance. This is used only in cases where we need to test server-side + // changes and don't care about client-side properties set from the init method. + // Reset the Connection Query Services instance so we can create a new connection to the cluster + public void resetCQS() { + cqs = null; + } + } + + + @Before + public void resetVariables() { + setOldTimestampToInduceUpgrade = false; + countUpgradeAttempts = 0; + actualSysCatUpgrades = 0; + } + + @After + public void tearDownMiniCluster() { + try { + if (testUtil != null) { + testUtil.shutdownMiniCluster(); + testUtil = null; + } + } catch (Exception e) { + // ignore + } + } + + + // Conditions: isDoNotUpgradePropSet is true + // Expected: We do not create SYSTEM.CATALOG even if this is the first connection to the server + @Test + public void testFirstConnectionDoNotUpgradePropSet() throws Exception { + startMiniClusterWithToggleNamespaceMapping(Boolean.FALSE.toString()); + Properties propsDoNotUpgradePropSet = new Properties(); + // Set doNotUpgradeProperty to true + UpgradeUtil.doNotUpgradeOnFirstConnection(propsDoNotUpgradePropSet); + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + new SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS); + try { + driver.getConnectionQueryServices(getJdbcUrl(), propsDoNotUpgradePropSet); + fail("Client should not be able to create SYSTEM.CATALOG since we set the doNotUpgrade property"); + } catch (Exception e) { + assertTrue(e instanceof UpgradeRequiredException); + } + hbaseTables = getHBaseTables(); + assertFalse(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG) || hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG)); + assertTrue(hbaseTables.size() == 0); + assertEquals(1, countUpgradeAttempts); + } + + + /********************* Testing SYSTEM.CATALOG/SYSTEM:CATALOG creation/upgrade behavior for subsequent connections *********************/ + + + // Conditions: server-side namespace mapping is enabled, the first connection to the server will create unmapped + // SYSTEM tables i.e. SYSTEM\..*, the second connection has client-side namespace mapping enabled and + // system table to system namespace mapping enabled + // Expected: We will migrate all SYSTEM\..* tables to the SYSTEM namespace + @Test + public void testMigrateToSystemNamespace() throws Exception { + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + firstConnectionNSMappingServerEnabledClientEnabledMappingDisabled(); + driver.resetCQS(); + // Setting this to true to effect migration of SYSTEM tables to the SYSTEM namespace + Properties clientProps = getClientProperties(true, true); + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + hbaseTables = getHBaseTables(); + assertEquals(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES, hbaseTables); + assertEquals(1, countUpgradeAttempts); + } + + // Conditions: server-side namespace mapping is enabled, the first connection to the server will create all namespace + // mapped SYSTEM tables i.e. SYSTEM:.*, the SYSTEM:CATALOG timestamp at creation is purposefully set to be < + // MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP. The subsequent connection has client-side namespace mapping enabled + // Expected: An upgrade is attempted when the second client connects to the server + @Test + public void testUpgradeAttempted() throws Exception { + setOldTimestampToInduceUpgrade = true; + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + firstConnectionNSMappingServerEnabledClientEnabled(); + driver.resetCQS(); + Properties clientProps = getClientProperties(true, true); + setOldTimestampToInduceUpgrade = false; + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + // There should be no new tables + assertEquals(hbaseTables, getHBaseTables()); + // Since we set an old timestamp on purpose when creating SYSTEM:CATALOG, the second connection attempts to upgrade it + assertEquals(1, countUpgradeAttempts); + assertEquals(1, actualSysCatUpgrades); + } + + // Conditions: server-side namespace mapping is enabled, the first connection to the server will create all namespace + // mapped SYSTEM tables i.e. SYSTEM:.*, the SYSTEM:CATALOG timestamp at creation is purposefully set to be < + // MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP. The subsequent connection has client-side namespace mapping enabled + // Expected: An upgrade is attempted when the second client connects to the server, but this fails since the + // isDoNotUpgradePropSet is set to true. We later run EXECUTE UPGRADE manually + @Test + public void testUpgradeNotAllowed() throws Exception { + setOldTimestampToInduceUpgrade = true; + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + firstConnectionNSMappingServerEnabledClientEnabled(); + driver.resetCQS(); + Properties clientProps = getClientProperties(true, true); + UpgradeUtil.doNotUpgradeOnFirstConnection(clientProps); + setOldTimestampToInduceUpgrade = false; + try { + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + } catch (Exception e) { + assertTrue(e instanceof UpgradeRequiredException); + } + // There should be no new tables + assertEquals(hbaseTables, getHBaseTables()); + // Since we set an old timestamp on purpose when creating SYSTEM:CATALOG, the second connection attempts to upgrade it + assertEquals(1, countUpgradeAttempts); + // This connection is unable to actually upgrade SYSTEM:CATALOG due to isDoNotUpgradePropSet + assertEquals(0, actualSysCatUpgrades); + Connection conn = driver.getConnectionQueryServices(getJdbcUrl(), new Properties()).connect(getJdbcUrl(), new Properties()); + try { + conn.createStatement().execute(EXECUTE_UPGRADE_COMMAND); + // Actually upgraded SYSTEM:CATALOG + assertEquals(1, actualSysCatUpgrades); + } finally { + conn.close(); + } + } + + // Conditions: server-side namespace mapping is enabled, the first connection to the server will create only SYSTEM.CATALOG, + // the second connection has client-side namespace mapping enabled + // Expected: We will migrate SYSTEM.CATALOG to SYSTEM namespace and create all other SYSTEM:.* tables + @Test + public void testMigrateSysCatCreateOthers() throws Exception { + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + firstConnectionNSMappingServerEnabledClientDisabled(); + driver.resetCQS(); + Properties clientProps = getClientProperties(true, true); + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + hbaseTables = getHBaseTables(); + assertEquals(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES, hbaseTables); + // SYSTEM.CATALOG migration to the SYSTEM namespace is counted as an upgrade + assertEquals(1, countUpgradeAttempts); + } + + // Conditions: server-side namespace mapping is enabled, the first connection to the server will create unmapped SYSTEM + // tables SYSTEM\..* whose timestamp at creation is purposefully set to be < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP. + // The second connection has client-side namespace mapping enabled and system table to system namespace mapping enabled + // Expected: We will migrate all SYSTEM\..* tables to the SYSTEM namespace and also upgrade SYSTEM:CATALOG + @Test + public void testMigrateToSystemNamespaceAndUpgradeSysCat() throws Exception { + setOldTimestampToInduceUpgrade = true; + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + firstConnectionNSMappingServerEnabledClientEnabledMappingDisabled(); + driver.resetCQS(); + setOldTimestampToInduceUpgrade = false; + Properties clientProps = getClientProperties(true, true); + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + hbaseTables = getHBaseTables(); + assertEquals(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES, hbaseTables); + assertEquals(1, countUpgradeAttempts); + assertEquals(1, actualSysCatUpgrades); + } + + // Conditions: server-side namespace mapping is enabled, the first connection to the server will create all namespace + // mapped SYSTEM tables i.e. SYSTEM:.*, the second connection has client-side namespace mapping disabled + // Expected: Throw Inconsistent namespace mapping exception from ensureTableCreated + @Test + public void testTablesExistInconsistentNSMappingFails() throws Exception { + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + firstConnectionNSMappingServerEnabledClientEnabled(); + driver.resetCQS(); + Properties clientProps = getClientProperties(false, false); + try { + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + fail("Client should not be able to connect to cluster with inconsistent client-server namespace mapping properties"); + } catch (SQLException sqlE) { + assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(), sqlE.getErrorCode()); + } + hbaseTables = getHBaseTables(); + assertEquals(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES, hbaseTables); + assertEquals(0, countUpgradeAttempts); + } + + // Conditions: server-side namespace mapping is enabled, the first connection to the server will create only SYSTEM.CATALOG, + // the second connection has client-side namespace mapping disabled + // Expected: Throw Inconsistent namespace mapping exception when you check client-server compatibility + @Test + public void testUnmappedSysCatExistsInconsistentNSMappingFails() throws Exception { + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + firstConnectionNSMappingServerEnabledClientDisabled(); + driver.resetCQS(); + Properties clientProps = getClientProperties(false, false); + try { + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + fail("Client should not be able to connect to cluster with inconsistent client-server namespace mapping properties"); + } catch (SQLException sqlE) { + assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(), sqlE.getErrorCode()); + } + hbaseTables = getHBaseTables(); + assertTrue(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG)); + assertTrue(hbaseTables.size() == 1); + assertEquals(0, countUpgradeAttempts); + } + + // Conditions: server-side namespace mapping is disabled, the first connection to the server will create all unmapped + // SYSTEM tables i.e. SYSTEM\..*, the second connection has client-side namespace mapping enabled + // Expected: Throw Inconsistent namespace mapping exception when you check client-server compatibility + @Test + public void testSysTablesExistInconsistentNSMappingFails() throws Exception { + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + firstConnectionNSMappingServerDisabledClientDisabled(); + driver.resetCQS(); + Properties clientProps = getClientProperties(true, true); + try { + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + fail("Client should not be able to connect to cluster with inconsistent client-server namespace mapping properties"); + } catch (SQLException sqlE) { + assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(), sqlE.getErrorCode()); + } + hbaseTables = getHBaseTables(); + assertEquals(PHOENIX_SYSTEM_TABLES, hbaseTables); + assertEquals(0, countUpgradeAttempts); + } + + // Conditions: server-side namespace mapping is disabled, the first connection to the server will create only SYSTEM:CATALOG + // and the second connection has client-side namespace mapping enabled + // Expected: Throw Inconsistent namespace mapping exception when you check client-server compatibility + @Test + public void testMappedSysCatExistsInconsistentNSMappingFails() throws Exception { + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + firstConnectionNSMappingServerDisabledClientEnabled(); + driver.resetCQS(); + Properties clientProps = getClientProperties(true, true); + try{ + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + fail("Client should not be able to connect to cluster with inconsistent client-server namespace mapping properties"); + } catch (SQLException sqlE) { + assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(), sqlE.getErrorCode()); + } + hbaseTables = getHBaseTables(); + assertTrue(hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG)); + assertTrue(hbaseTables.size() == 1); + assertEquals(0, countUpgradeAttempts); + } + + // Conditions: server-side namespace mapping is disabled, the first connection to the server will create all SYSTEM\..* + // tables and the second connection has client-side namespace mapping disabled + // Expected: All SYSTEM\..* tables exist and no upgrade is required + @Test + public void testNSMappingDisabledNoUpgradeRequired() throws Exception { + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + firstConnectionNSMappingServerDisabledClientDisabled(); + driver.resetCQS(); + Properties clientProps = getClientProperties(false, false); + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + hbaseTables = getHBaseTables(); + assertEquals(PHOENIX_SYSTEM_TABLES, hbaseTables); + assertEquals(0, countUpgradeAttempts); + } + + // Conditions: server-side namespace mapping is disabled, the first connection to the server will create only SYSTEM:CATALOG + // and the second connection has client-side namespace mapping disabled + // Expected: The second connection should fail with Inconsistent namespace mapping exception + @Test + public void testClientNSMappingDisabledConnectionFails() throws Exception { + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + firstConnectionNSMappingServerDisabledClientEnabled(); + driver.resetCQS(); + Properties clientProps = getClientProperties(false, false); + try{ + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + fail("Client should not be able to connect to cluster with inconsistent client-server namespace mapping properties"); + } catch (SQLException sqlE) { + assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(), sqlE.getErrorCode()); + } + hbaseTables = getHBaseTables(); + assertTrue(hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG)); + assertTrue(hbaseTables.size() == 1); + assertEquals(0, countUpgradeAttempts); + } + + // Conditions: The first connection creates all SYSTEM tables via "EXECUTE UPGRADE" since auto-upgrade is disabled + // and the same client alters HBase metadata for SYSTEM.CATALOG + // Expected: Another client connection (with a new ConnectionQueryServices instance) made to the server does not + // revert the metadata change + @Test + public void testMetadataAlterRemainsAutoUpgradeDisabled() throws Exception { + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = firstConnectionAutoUpgradeToggle(false); + assertEquals(Integer.parseInt(MODIFIED_MAX_VERSIONS), verifyModificationTableMetadata(driver, PHOENIX_SYSTEM_CATALOG)); + } + + // Conditions: The first connection creates all SYSTEM tables (auto-upgrade is enabled) and the same client alters + // HBase metadata for SYSTEM.CATALOG + // Expected: Another client connection (with a new ConnectionQueryServices instance) made to the server does not + // revert the metadata change + @Test + public void testMetadataAlterRemainsAutoUpgradeEnabled() throws Exception { + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = firstConnectionAutoUpgradeToggle(true); + assertEquals(Integer.parseInt(MODIFIED_MAX_VERSIONS), verifyModificationTableMetadata(driver, PHOENIX_SYSTEM_CATALOG)); + } + + /** + * Return all created HBase tables + * @return Set of HBase table name strings + * @throws IOException + */ + private Set<String> getHBaseTables() throws IOException { + Set<String> tables = new HashSet<>(); + for (TableName tn : testUtil.getHBaseAdmin().listTableNames()) { + tables.add(tn.getNameAsString()); + } + return tables; + } + + /** + * Alter the table metadata and return modified value + * @param driver + * @param tableName + * @return value of VERSIONS option for the table + * @throws Exception + */ + private int verifyModificationTableMetadata(PhoenixSysCatCreationTestingDriver driver, String tableName) throws Exception { + // Modify table metadata + Connection conn = driver.getConnectionQueryServices(getJdbcUrl(), new Properties()).connect(getJdbcUrl(), new Properties()); + conn.createStatement().execute("ALTER TABLE " + tableName + " SET VERSIONS = " + MODIFIED_MAX_VERSIONS); + + // Connect via a client that creates a new ConnectionQueryServices instance + driver.resetCQS(); + driver.getConnectionQueryServices(getJdbcUrl(), new Properties()).connect(getJdbcUrl(), new Properties()); + HTableDescriptor descriptor = testUtil.getHBaseAdmin().getTableDescriptor(TableName.valueOf(tableName)); + return descriptor.getFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES).getMaxVersions(); + } + + /** + * Start the mini-cluster with server-side namespace mapping property specified + * @param isNamespaceMappingEnabled + * @throws Exception + */ + private void startMiniClusterWithToggleNamespaceMapping(String isNamespaceMappingEnabled) throws Exception { + testUtil = new HBaseTestingUtility(); + Configuration conf = testUtil.getConfiguration(); + conf.set(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, isNamespaceMappingEnabled); + // Avoid multiple clusters trying to bind to the master's info port (16010) + conf.setInt(HConstants.MASTER_INFO_PORT, -1); + testUtil.startMiniCluster(1); + } + + /** + * Get the connection string for the mini-cluster + * @return Phoenix connection string + */ + private String getJdbcUrl() { + return "jdbc:phoenix:localhost:" + testUtil.getZkCluster().getClientPort() + ":/hbase"; + } + + /** + * Set namespace mapping related properties for the client connection + * @param nsMappingEnabled + * @param systemTableMappingEnabled + * @return Properties object + */ + private Properties getClientProperties(boolean nsMappingEnabled, boolean systemTableMappingEnabled) { + Properties clientProps = new Properties(); + clientProps.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.valueOf(nsMappingEnabled).toString()); + clientProps.setProperty(QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE, Boolean.valueOf(systemTableMappingEnabled).toString()); + return clientProps; + } + + /** + * Initiate the first connection to the server with provided auto-upgrade property + * @param isAutoUpgradeEnabled + * @return Phoenix JDBC driver + * @throws Exception + */ + private SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver firstConnectionAutoUpgradeToggle(boolean isAutoUpgradeEnabled) + throws Exception { + if (isAutoUpgradeEnabled) { + return firstConnectionNSMappingServerDisabledClientDisabled(); + } + return firstConnectionAutoUpgradeDisabled(); + } + + // Conditions: isAutoUpgradeEnabled is false + // Expected: We do not create SYSTEM.CATALOG even if this is the first connection to the server. Later, when we manually + // run "EXECUTE UPGRADE", we create SYSTEM tables + private SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver firstConnectionAutoUpgradeDisabled() throws Exception { + startMiniClusterWithToggleNamespaceMapping(Boolean.FALSE.toString()); + Map<String, String> props = new HashMap<>(); + // Note that the isAutoUpgradeEnabled property is set when instantiating connection query services, not during init + props.put(QueryServices.AUTO_UPGRADE_ENABLED, Boolean.FALSE.toString()); + ReadOnlyProps readOnlyProps = new ReadOnlyProps(props); + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + new SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(readOnlyProps); + try { + driver.getConnectionQueryServices(getJdbcUrl(), new Properties()); + fail("Client should not be able to create SYSTEM.CATALOG since we set the isAutoUpgradeEnabled property to false"); + } catch (Exception e) { + assertTrue(e instanceof UpgradeRequiredException); + } + hbaseTables = getHBaseTables(); + assertFalse(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG) || hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG)); + assertTrue(hbaseTables.size() == 0); + assertEquals(1, countUpgradeAttempts); + + // We use the same ConnectionQueryServices instance to run "EXECUTE UPGRADE" + Connection conn = driver.getConnectionQueryServices(getJdbcUrl(), new Properties()).connect(getJdbcUrl(), new Properties()); + try { + conn.createStatement().execute(EXECUTE_UPGRADE_COMMAND); + } finally { + conn.close(); + } + hbaseTables = getHBaseTables(); + assertEquals(PHOENIX_SYSTEM_TABLES, hbaseTables); + return driver; + } + + // Conditions: server-side namespace mapping is enabled, client-side namespace mapping is enabled and system tables + // are to be mapped to the SYSTEM namespace. + // Expected: If this is the first connection to the server, we should be able to create all namespace mapped system tables i.e. SYSTEM:.* + private SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver firstConnectionNSMappingServerEnabledClientEnabled() + throws Exception { + startMiniClusterWithToggleNamespaceMapping(Boolean.TRUE.toString()); + Properties clientProps = getClientProperties(true, true); + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + new SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS); + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + hbaseTables = getHBaseTables(); + assertEquals(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES, hbaseTables); + assertEquals(0, countUpgradeAttempts); + return driver; + } + + // Conditions: server-side namespace mapping is enabled, client-side namespace mapping is enabled, but mapping + // SYSTEM tables to the SYSTEM namespace is disabled + // Expected: If this is the first connection to the server, we will create unmapped SYSTEM tables i.e. SYSTEM\..* + private SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver firstConnectionNSMappingServerEnabledClientEnabledMappingDisabled() + throws Exception { + startMiniClusterWithToggleNamespaceMapping(Boolean.TRUE.toString()); + // client-side namespace mapping is enabled, but mapping SYSTEM tables to SYSTEM namespace is not + Properties clientProps = getClientProperties(true, false); + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + new SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS); + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + hbaseTables = getHBaseTables(); + assertEquals(PHOENIX_SYSTEM_TABLES, hbaseTables); + assertEquals(0, countUpgradeAttempts); + return driver; + } + + // Conditions: server-side namespace mapping is enabled, client-side namespace mapping is disabled + // Expected: Since this is the first connection to the server, we will create SYSTEM.CATALOG but immediately + // throw an exception for inconsistent namespace mapping + private SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver firstConnectionNSMappingServerEnabledClientDisabled() + throws Exception { + startMiniClusterWithToggleNamespaceMapping(Boolean.TRUE.toString()); + Properties clientProps = getClientProperties(false, false); + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + new SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS); + try { + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + fail("Client should not be able to connect to cluster with inconsistent client-server namespace mapping properties"); + } catch (SQLException sqlE) { + assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(), sqlE.getErrorCode()); + } + hbaseTables = getHBaseTables(); + assertTrue(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG)); + assertTrue(hbaseTables.size() == 1); + assertEquals(0, countUpgradeAttempts); + return driver; + } + + // Conditions: server-side namespace mapping is disabled, client-side namespace mapping is enabled + // Expected: Since this is the first connection to the server, we will create the SYSTEM namespace and create + // SYSTEM:CATALOG and then immediately throw an exception for inconsistent namespace mapping + private SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver firstConnectionNSMappingServerDisabledClientEnabled() + throws Exception { + startMiniClusterWithToggleNamespaceMapping(Boolean.FALSE.toString()); + Properties clientProps = getClientProperties(true, true); + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + new SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS); + try { + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + fail("Client should not be able to connect to cluster with inconsistent client-server namespace mapping properties"); + } catch (SQLException sqlE) { + assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(), sqlE.getErrorCode()); + } + hbaseTables = getHBaseTables(); + assertTrue(hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG)); + assertTrue(hbaseTables.size() == 1); + assertEquals(0, countUpgradeAttempts); + return driver; + } + + // Conditions: server-side namespace mapping is disabled, client-side namespace mapping is disabled + // Expected: Since this is the first connection to the server and auto-upgrade is enabled by default, + // we will create all SYSTEM\..* tables + private SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver firstConnectionNSMappingServerDisabledClientDisabled() + throws Exception { + startMiniClusterWithToggleNamespaceMapping(Boolean.FALSE.toString()); + Properties clientProps = getClientProperties(false, false); + SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = + new SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS); + driver.getConnectionQueryServices(getJdbcUrl(), clientProps); + hbaseTables = getHBaseTables(); + assertEquals(PHOENIX_SYSTEM_TABLES, hbaseTables); + assertEquals(0, countUpgradeAttempts); + return driver; + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/phoenix/blob/8cda8141/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java index a2d008b..34218d5 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java @@ -3615,6 +3615,27 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso } long version = MetaDataUtil.encodeVersion(env.getHBaseVersion(), config); + PTable systemCatalog = null; + byte[] tableKey = + SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA_BYTES, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES); + ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(tableKey); + try { + systemCatalog = loadTable(env, tableKey, cacheKey, MIN_SYSTEM_TABLE_TIMESTAMP, + HConstants.LATEST_TIMESTAMP, request.getClientVersion()); + } catch (Throwable t) { + logger.error("loading system catalog table inside getVersion failed", t); + ProtobufUtil.setControllerException(controller, + ServerUtil.createIOException( + SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, + isTablesMappingEnabled).toString(), t)); + } + // In case this is the first connection, system catalog does not exist, and so we don't + // set the optional system catalog timestamp. + if (systemCatalog != null) { + builder.setSystemCatalogTimestamp(systemCatalog.getTimeStamp()); + } builder.setVersion(version); done.run(builder.build()); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/8cda8141/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java index a71ce0c..26f8198 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java @@ -448,6 +448,10 @@ public abstract class MetaDataProtocol extends MetaDataService { return iterator.next(); } + public static long getPriorUpgradeVersion() { + return TIMESTAMP_VERSION_MAP.lowerKey(TIMESTAMP_VERSION_MAP.lastKey()); + } + public static String getVersion(long serverTimestamp) { /* * It is possible that when clients are trying to run upgrades concurrently, we could be at an intermediate http://git-wip-us.apache.org/repos/asf/phoenix/blob/8cda8141/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java index e041abd..8119c6e 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java @@ -14117,6 +14117,16 @@ public final class MetaDataProtos { * <code>required int64 version = 1;</code> */ long getVersion(); + + // optional int64 systemCatalogTimestamp = 2; + /** + * <code>optional int64 systemCatalogTimestamp = 2;</code> + */ + boolean hasSystemCatalogTimestamp(); + /** + * <code>optional int64 systemCatalogTimestamp = 2;</code> + */ + long getSystemCatalogTimestamp(); } /** * Protobuf type {@code GetVersionResponse} @@ -14174,6 +14184,11 @@ public final class MetaDataProtos { version_ = input.readInt64(); break; } + case 16: { + bitField0_ |= 0x00000002; + systemCatalogTimestamp_ = input.readInt64(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -14230,8 +14245,25 @@ public final class MetaDataProtos { return version_; } + // optional int64 systemCatalogTimestamp = 2; + public static final int SYSTEMCATALOGTIMESTAMP_FIELD_NUMBER = 2; + private long systemCatalogTimestamp_; + /** + * <code>optional int64 systemCatalogTimestamp = 2;</code> + */ + public boolean hasSystemCatalogTimestamp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * <code>optional int64 systemCatalogTimestamp = 2;</code> + */ + public long getSystemCatalogTimestamp() { + return systemCatalogTimestamp_; + } + private void initFields() { version_ = 0L; + systemCatalogTimestamp_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -14252,6 +14284,9 @@ public final class MetaDataProtos { if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, version_); } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt64(2, systemCatalogTimestamp_); + } getUnknownFields().writeTo(output); } @@ -14265,6 +14300,10 @@ public final class MetaDataProtos { size += com.google.protobuf.CodedOutputStream .computeInt64Size(1, version_); } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, systemCatalogTimestamp_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -14293,6 +14332,11 @@ public final class MetaDataProtos { result = result && (getVersion() == other.getVersion()); } + result = result && (hasSystemCatalogTimestamp() == other.hasSystemCatalogTimestamp()); + if (hasSystemCatalogTimestamp()) { + result = result && (getSystemCatalogTimestamp() + == other.getSystemCatalogTimestamp()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -14310,6 +14354,10 @@ public final class MetaDataProtos { hash = (37 * hash) + VERSION_FIELD_NUMBER; hash = (53 * hash) + hashLong(getVersion()); } + if (hasSystemCatalogTimestamp()) { + hash = (37 * hash) + SYSTEMCATALOGTIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getSystemCatalogTimestamp()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -14421,6 +14469,8 @@ public final class MetaDataProtos { super.clear(); version_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); + systemCatalogTimestamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -14453,6 +14503,10 @@ public final class MetaDataProtos { to_bitField0_ |= 0x00000001; } result.version_ = version_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.systemCatalogTimestamp_ = systemCatalogTimestamp_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -14472,6 +14526,9 @@ public final class MetaDataProtos { if (other.hasVersion()) { setVersion(other.getVersion()); } + if (other.hasSystemCatalogTimestamp()) { + setSystemCatalogTimestamp(other.getSystemCatalogTimestamp()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -14536,6 +14593,39 @@ public final class MetaDataProtos { return this; } + // optional int64 systemCatalogTimestamp = 2; + private long systemCatalogTimestamp_ ; + /** + * <code>optional int64 systemCatalogTimestamp = 2;</code> + */ + public boolean hasSystemCatalogTimestamp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * <code>optional int64 systemCatalogTimestamp = 2;</code> + */ + public long getSystemCatalogTimestamp() { + return systemCatalogTimestamp_; + } + /** + * <code>optional int64 systemCatalogTimestamp = 2;</code> + */ + public Builder setSystemCatalogTimestamp(long value) { + bitField0_ |= 0x00000002; + systemCatalogTimestamp_ = value; + onChanged(); + return this; + } + /** + * <code>optional int64 systemCatalogTimestamp = 2;</code> + */ + public Builder clearSystemCatalogTimestamp() { + bitField0_ = (bitField0_ & ~0x00000002); + systemCatalogTimestamp_ = 0L; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:GetVersionResponse) } @@ -17103,51 +17193,52 @@ public final class MetaDataProtos { "cheRequest\022\025\n\rclientVersion\030\001 \001(\005\"*\n\022Cle" + "arCacheResponse\022\024\n\014unfreedBytes\030\001 \001(\003\"*\n" + "\021GetVersionRequest\022\025\n\rclientVersion\030\001 \001(" + - "\005\"%\n\022GetVersionResponse\022\017\n\007version\030\001 \002(\003" + - "\"\205\001\n\032ClearTableFromCacheRequest\022\020\n\010tenan", - "tId\030\001 \002(\014\022\022\n\nschemaName\030\002 \002(\014\022\021\n\ttableNa" + - "me\030\003 \002(\014\022\027\n\017clientTimestamp\030\004 \002(\003\022\025\n\rcli" + - "entVersion\030\005 \001(\005\"\035\n\033ClearTableFromCacheR" + - "esponse*\365\004\n\014MutationCode\022\030\n\024TABLE_ALREAD" + - "Y_EXISTS\020\000\022\023\n\017TABLE_NOT_FOUND\020\001\022\024\n\020COLUM" + - "N_NOT_FOUND\020\002\022\031\n\025COLUMN_ALREADY_EXISTS\020\003" + - "\022\035\n\031CONCURRENT_TABLE_MUTATION\020\004\022\027\n\023TABLE" + - "_NOT_IN_REGION\020\005\022\025\n\021NEWER_TABLE_FOUND\020\006\022" + - "\034\n\030UNALLOWED_TABLE_MUTATION\020\007\022\021\n\rNO_PK_C" + - "OLUMNS\020\010\022\032\n\026PARENT_TABLE_NOT_FOUND\020\t\022\033\n\027", - "FUNCTION_ALREADY_EXISTS\020\n\022\026\n\022FUNCTION_NO" + - "T_FOUND\020\013\022\030\n\024NEWER_FUNCTION_FOUND\020\014\022\032\n\026F" + - "UNCTION_NOT_IN_REGION\020\r\022\031\n\025SCHEMA_ALREAD" + - "Y_EXISTS\020\016\022\026\n\022NEWER_SCHEMA_FOUND\020\017\022\024\n\020SC" + - "HEMA_NOT_FOUND\020\020\022\030\n\024SCHEMA_NOT_IN_REGION" + - "\020\021\022\032\n\026TABLES_EXIST_ON_SCHEMA\020\022\022\035\n\031UNALLO" + - "WED_SCHEMA_MUTATION\020\023\022%\n!AUTO_PARTITION_" + - "SEQUENCE_NOT_FOUND\020\024\022#\n\037CANNOT_COERCE_AU" + - "TO_PARTITION_ID\020\025\022\024\n\020TOO_MANY_INDEXES\020\0262" + - "\345\006\n\017MetaDataService\022/\n\010getTable\022\020.GetTab", - "leRequest\032\021.MetaDataResponse\0227\n\014getFunct" + - "ions\022\024.GetFunctionsRequest\032\021.MetaDataRes" + - "ponse\0221\n\tgetSchema\022\021.GetSchemaRequest\032\021." + - "MetaDataResponse\0225\n\013createTable\022\023.Create" + - "TableRequest\032\021.MetaDataResponse\022;\n\016creat" + - "eFunction\022\026.CreateFunctionRequest\032\021.Meta" + - "DataResponse\0227\n\014createSchema\022\024.CreateSch" + - "emaRequest\032\021.MetaDataResponse\0221\n\tdropTab" + - "le\022\021.DropTableRequest\032\021.MetaDataResponse" + - "\0223\n\ndropSchema\022\022.DropSchemaRequest\032\021.Met", - "aDataResponse\0227\n\014dropFunction\022\024.DropFunc" + - "tionRequest\032\021.MetaDataResponse\0221\n\taddCol" + - "umn\022\021.AddColumnRequest\032\021.MetaDataRespons" + - "e\0223\n\ndropColumn\022\022.DropColumnRequest\032\021.Me" + - "taDataResponse\022?\n\020updateIndexState\022\030.Upd" + - "ateIndexStateRequest\032\021.MetaDataResponse\022" + - "5\n\nclearCache\022\022.ClearCacheRequest\032\023.Clea" + - "rCacheResponse\0225\n\ngetVersion\022\022.GetVersio" + - "nRequest\032\023.GetVersionResponse\022P\n\023clearTa" + - "bleFromCache\022\033.ClearTableFromCacheReques", - "t\032\034.ClearTableFromCacheResponseBB\n(org.a" + - "pache.phoenix.coprocessor.generatedB\016Met" + - "aDataProtosH\001\210\001\001\240\001\001" + "\005\"E\n\022GetVersionResponse\022\017\n\007version\030\001 \002(\003" + + "\022\036\n\026systemCatalogTimestamp\030\002 \001(\003\"\205\001\n\032Cle", + "arTableFromCacheRequest\022\020\n\010tenantId\030\001 \002(" + + "\014\022\022\n\nschemaName\030\002 \002(\014\022\021\n\ttableName\030\003 \002(\014" + + "\022\027\n\017clientTimestamp\030\004 \002(\003\022\025\n\rclientVersi" + + "on\030\005 \001(\005\"\035\n\033ClearTableFromCacheResponse*" + + "\365\004\n\014MutationCode\022\030\n\024TABLE_ALREADY_EXISTS" + + "\020\000\022\023\n\017TABLE_NOT_FOUND\020\001\022\024\n\020COLUMN_NOT_FO" + + "UND\020\002\022\031\n\025COLUMN_ALREADY_EXISTS\020\003\022\035\n\031CONC" + + "URRENT_TABLE_MUTATION\020\004\022\027\n\023TABLE_NOT_IN_" + + "REGION\020\005\022\025\n\021NEWER_TABLE_FOUND\020\006\022\034\n\030UNALL" + + "OWED_TABLE_MUTATION\020\007\022\021\n\rNO_PK_COLUMNS\020\010", + "\022\032\n\026PARENT_TABLE_NOT_FOUND\020\t\022\033\n\027FUNCTION" + + "_ALREADY_EXISTS\020\n\022\026\n\022FUNCTION_NOT_FOUND\020" + + "\013\022\030\n\024NEWER_FUNCTION_FOUND\020\014\022\032\n\026FUNCTION_" + + "NOT_IN_REGION\020\r\022\031\n\025SCHEMA_ALREADY_EXISTS" + + "\020\016\022\026\n\022NEWER_SCHEMA_FOUND\020\017\022\024\n\020SCHEMA_NOT" + + "_FOUND\020\020\022\030\n\024SCHEMA_NOT_IN_REGION\020\021\022\032\n\026TA" + + "BLES_EXIST_ON_SCHEMA\020\022\022\035\n\031UNALLOWED_SCHE" + + "MA_MUTATION\020\023\022%\n!AUTO_PARTITION_SEQUENCE" + + "_NOT_FOUND\020\024\022#\n\037CANNOT_COERCE_AUTO_PARTI" + + "TION_ID\020\025\022\024\n\020TOO_MANY_INDEXES\020\0262\345\006\n\017Meta", + "DataService\022/\n\010getTable\022\020.GetTableReques" + + "t\032\021.MetaDataResponse\0227\n\014getFunctions\022\024.G" + + "etFunctionsRequest\032\021.MetaDataResponse\0221\n" + + "\tgetSchema\022\021.GetSchemaRequest\032\021.MetaData" + + "Response\0225\n\013createTable\022\023.CreateTableReq" + + "uest\032\021.MetaDataResponse\022;\n\016createFunctio" + + "n\022\026.CreateFunctionRequest\032\021.MetaDataResp" + + "onse\0227\n\014createSchema\022\024.CreateSchemaReque" + + "st\032\021.MetaDataResponse\0221\n\tdropTable\022\021.Dro" + + "pTableRequest\032\021.MetaDataResponse\0223\n\ndrop", + "Schema\022\022.DropSchemaRequest\032\021.MetaDataRes" + + "ponse\0227\n\014dropFunction\022\024.DropFunctionRequ" + + "est\032\021.MetaDataResponse\0221\n\taddColumn\022\021.Ad" + + "dColumnRequest\032\021.MetaDataResponse\0223\n\ndro" + + "pColumn\022\022.DropColumnRequest\032\021.MetaDataRe" + + "sponse\022?\n\020updateIndexState\022\030.UpdateIndex" + + "StateRequest\032\021.MetaDataResponse\0225\n\nclear" + + "Cache\022\022.ClearCacheRequest\032\023.ClearCacheRe" + + "sponse\0225\n\ngetVersion\022\022.GetVersionRequest" + + "\032\023.GetVersionResponse\022P\n\023clearTableFromC", + "ache\022\033.ClearTableFromCacheRequest\032\034.Clea" + + "rTableFromCacheResponseBB\n(org.apache.ph" + + "oenix.coprocessor.generatedB\016MetaDataPro" + + "tosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -17261,7 +17352,7 @@ public final class MetaDataProtos { internal_static_GetVersionResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetVersionResponse_descriptor, - new java.lang.String[] { "Version", }); + new java.lang.String[] { "Version", "SystemCatalogTimestamp", }); internal_static_ClearTableFromCacheRequest_descriptor = getDescriptor().getMessageTypes().get(18); internal_static_ClearTableFromCacheRequest_fieldAccessorTable = new http://git-wip-us.apache.org/repos/asf/phoenix/blob/8cda8141/phoenix-core/src/main/java/org/apache/phoenix/exception/UpgradeRequiredException.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/UpgradeRequiredException.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/UpgradeRequiredException.java index 9352a50..6c9706b 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/exception/UpgradeRequiredException.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/UpgradeRequiredException.java @@ -17,12 +17,23 @@ */ package org.apache.phoenix.exception; +import org.apache.hadoop.hbase.HConstants; public class UpgradeRequiredException extends RetriableUpgradeException { + private final long systemCatalogTimestamp; public UpgradeRequiredException() { + this(HConstants.OLDEST_TIMESTAMP); + } + + public UpgradeRequiredException(long systemCatalogTimeStamp) { super("Operation not allowed since cluster hasn't been upgraded. Call EXECUTE UPGRADE. ", - SQLExceptionCode.UPGRADE_REQUIRED.getSQLState(), SQLExceptionCode.UPGRADE_REQUIRED.getErrorCode()); + SQLExceptionCode.UPGRADE_REQUIRED.getSQLState(), SQLExceptionCode.UPGRADE_REQUIRED.getErrorCode()); + this.systemCatalogTimestamp = systemCatalogTimeStamp; + } + + public long getSystemCatalogTimeStamp() { + return systemCatalogTimestamp; } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/8cda8141/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java index b75119b..72bfa83 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java @@ -84,7 +84,7 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated public MetaDataMutationResult createTable(List<Mutation> tableMetaData, byte[] tableName, PTableType tableType, Map<String, Object> tableProps, List<Pair<byte[], Map<String, Object>>> families, byte[][] splits, - boolean isNamespaceMapped, boolean allocateIndexId) throws SQLException; + boolean isNamespaceMapped, boolean allocateIndexId, boolean isDoNotUpgradePropSet) throws SQLException; public MetaDataMutationResult dropTable(List<Mutation> tableMetadata, PTableType tableType, boolean cascade) throws SQLException; public MetaDataMutationResult dropFunction(List<Mutation> tableMetadata, boolean ifExists) throws SQLException; public MetaDataMutationResult addColumn(List<Mutation> tableMetaData, PTable table, Map<String, List<Pair<String,Object>>> properties, Set<String> colFamiliesForPColumnsToBeAdded, List<PColumn> columns) throws SQLException;