PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client 
and server when upgraded from 4.6


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a6de19bd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a6de19bd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a6de19bd

Branch: refs/heads/calcite
Commit: a6de19bd2290d8242b002ff661ec7028577e3055
Parents: 1f7b47a
Author: James Taylor <jtay...@salesforce.com>
Authored: Wed Feb 24 17:25:51 2016 -0800
Committer: James Taylor <jtay...@salesforce.com>
Committed: Wed Feb 24 17:28:26 2016 -0800

----------------------------------------------------------------------
 .../query/ConnectionQueryServicesImpl.java      | 65 ++++++++++++++++----
 .../stats/StatisticsCollectorFactory.java       | 17 ++++-
 2 files changed, 68 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a6de19bd/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index d27a4bc..37ebc78 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2367,24 +2367,24 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
                                             
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0, columnsToAdd);
                                 }
                                 if(currentServerSideTableTimeStamp < 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0) {
+                                    // Drop old stats table so that new stats 
table is created
+                                    metaConnection = 
dropStatsTable(metaConnection, 
+                                            
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 4);
                                     // Add these columns one at a time, each 
with different timestamps so that if folks have
                                     // run the upgrade code already for a 
snapshot, we'll still enter this block (and do the
                                     // parts we haven't yet done).
-                                    metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 4,
+                                    metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
+                                            
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 3,
                                             
PhoenixDatabaseMetaData.TRANSACTIONAL + " " + 
PBoolean.INSTANCE.getSqlTypeName());
-                                    // Drop old stats table so that new stats 
table is created
-                                    metaConnection = 
dropStatsTable(metaConnection,
-                                            
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 3);
-                                    metaConnection = 
addColumnsIfNotExists(metaConnection,
-                                            
PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+                                    metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
                                             
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 2,
-                                            
PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " "
-                                                    + 
PLong.INSTANCE.getSqlTypeName());
-                                    metaConnection = 
setImmutableTableIndexesImmutable(metaConnection, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 1);
-                                    Properties props = 
PropertiesUtil.deepCopy(metaConnection.getClientInfo());
-                                    
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0));
-                                    metaConnection = new 
PhoenixConnection(metaConnection, ConnectionQueryServicesImpl.this, props);
-                                    // that already have cached data.
+                                            
PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " " + 
PLong.INSTANCE.getSqlTypeName());
+                                    metaConnection = 
setImmutableTableIndexesImmutable(metaConnection, 
+                                            
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 1);
+                                    metaConnection = 
updateSystemCatalogTimestamp(metaConnection, 
+                                            
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0);
+                                    
ConnectionQueryServicesImpl.this.removeTable(null, 
PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0);
+                                    logger.warn("Update of SYSTEM.CATALOG 
complete");
                                                                        
clearCache();
                                 }
                                 
@@ -2535,6 +2535,45 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
         return metaConnection;
     }
 
+    /**
+     * Forces update of SYSTEM.CATALOG by setting column to existing value
+     * @param oldMetaConnection
+     * @param timestamp
+     * @return
+     * @throws SQLException
+     */
+    private PhoenixConnection updateSystemCatalogTimestamp(PhoenixConnection 
oldMetaConnection, long timestamp) throws SQLException {
+        SQLException sqlE = null;
+        Properties props = 
PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo());
+        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(timestamp));
+        PhoenixConnection metaConnection = new 
PhoenixConnection(oldMetaConnection, this, props);
+        boolean autoCommit = metaConnection.getAutoCommit();
+        try {
+            metaConnection.setAutoCommit(true);
+            metaConnection.createStatement().execute(
+                    "UPSERT INTO SYSTEM.CATALOG(TENANT_ID, TABLE_SCHEM, 
TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, DISABLE_WAL)\n" + 
+                    "VALUES (NULL, '" + QueryConstants.SYSTEM_SCHEMA_NAME + 
"','" + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE + "', NULL, NULL, 
FALSE)"); 
+        } catch (SQLException e) {
+            logger.warn("exception during upgrading stats table:" + e);
+            sqlE = e;
+        } finally {
+            try {
+                metaConnection.setAutoCommit(autoCommit);
+                oldMetaConnection.close();
+            } catch (SQLException e) {
+                if (sqlE != null) {
+                    sqlE.setNextException(e);
+                } else {
+                    sqlE = e;
+                }
+            }
+            if (sqlE != null) {
+                throw sqlE;
+            }
+        }
+        return metaConnection;
+    }
+
     private PhoenixConnection dropStatsTable(PhoenixConnection 
oldMetaConnection, long timestamp)
                        throws SQLException, IOException {
                Properties props = 
PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a6de19bd/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectorFactory.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectorFactory.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectorFactory.java
index aaffd73..964bb28 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectorFactory.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectorFactory.java
@@ -18,10 +18,15 @@
 package org.apache.phoenix.schema.stats;
 
 import java.io.IOException;
+import java.util.Set;
 
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.QueryServices;
 
+import com.google.common.collect.Sets;
+
 /**
  * Provides new {@link StatisticsCollector} instances based on configuration 
settings for a
  * table (or system-wide configuration of statistics).
@@ -50,6 +55,15 @@ public class StatisticsCollectorFactory {
         }
     }
 
+    // TODO: make this declarative through new DISABLE_STATS column on 
SYSTEM.CATALOG table.
+    // Also useful would be a USE_CURRENT_TIME_FOR_STATS column on 
SYSTEM.CATALOG table.
+    private static final Set<TableName> DISABLE_STATS = 
Sets.newHashSetWithExpectedSize(3);
+    static {
+        
DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
+        
DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME));
+        
DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME));
+    }
+    
     /**
      * Determines if statistics are enabled (which is the default). This is 
done on the
      * RegionCoprocessorEnvironment for now to allow setting this on a 
per-table basis, although
@@ -57,7 +71,8 @@ public class StatisticsCollectorFactory {
      * use case for that.
      */
     private static boolean statisticsEnabled(RegionCoprocessorEnvironment env) 
{
-        return 
env.getConfiguration().getBoolean(QueryServices.STATS_ENABLED_ATTRIB, true);
+        return 
env.getConfiguration().getBoolean(QueryServices.STATS_ENABLED_ATTRIB, true) &&
+                !DISABLE_STATS.contains(env.getRegionInfo().getTable());
     }
 
 }

Reply via email to