This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
     new bbcc362  PHOENIX-5718 GetTable builds a table excluding the given 
clientTimeStamp
bbcc362 is described below

commit bbcc362ee459492bfca7bf66aa1bdde85c25464f
Author: Sandeep Guggilam <sguggi...@sandeepg-ltm.internal.salesforce.com>
AuthorDate: Mon Mar 23 15:47:43 2020 -0700

    PHOENIX-5718 GetTable builds a table excluding the given clientTimeStamp
    
    Signed-off-by: Chinmay Kulkarni <chinmayskulka...@apache.org>
---
 .../phoenix/end2end/MetaDataEndpointImplIT.java    | 35 +++++++++++++++++++++-
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  | 23 +++++++-------
 2 files changed, 46 insertions(+), 12 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
index dca4b6b..cd6c6f3 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
@@ -17,11 +17,13 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.TableViewFinderResult;
 import org.apache.phoenix.util.ViewUtil;
 import org.junit.Test;
@@ -207,6 +209,37 @@ public class MetaDataEndpointImplIT extends 
ParallelStatsDisabledIT {
         assertColumnNamesEqual(PhoenixRuntime.getTableNoCache(conn, 
childView.toUpperCase()), "A", "B", "D");
 
     }
+    
+    @Test
+    public void testUpdateCacheWithAlteringColumns() throws Exception {
+        String tableName = generateUniqueName();
+        
+        try (PhoenixConnection conn = 
DriverManager.getConnection(getUrl()).unwrap(
+            PhoenixConnection.class)) {
+            String ddlFormat =
+                    "CREATE TABLE IF NOT EXISTS " + tableName + "  (" + " PK2 
INTEGER NOT NULL, "
+                            + "V1 INTEGER, V2 INTEGER "
+                            + " CONSTRAINT NAME_PK PRIMARY KEY (PK2)" + " )";
+            conn.createStatement().execute(ddlFormat);
+            conn.createStatement().execute("ALTER TABLE " + tableName + " ADD 
V3 integer");
+            PTable table = PhoenixRuntime.getTable(conn, 
tableName.toUpperCase());
+            assertColumnNamesEqual(table, "PK2", "V1", "V2", "V3");
+            
+            // Set the SCN to the timestamp when V3 column is added
+            Properties props = PropertiesUtil.deepCopy(conn.getClientInfo());
+            props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(table.getTimeStamp()));
+            
+            try(PhoenixConnection metaConnection = new PhoenixConnection(conn, 
+                conn.getQueryServices(), props)) {
+                // Force update the cache and check if V3 is present in the 
returned table result
+                table = PhoenixRuntime.getTableNoCache(metaConnection, 
tableName.toUpperCase());
+                assertColumnNamesEqual(table, "PK2", "V1", "V2", "V3");
+            }
+        }
+        
+        
+    }
+    
 
     @Test
     public void testDroppingAColumn() throws Exception {
@@ -376,4 +409,4 @@ public class MetaDataEndpointImplIT extends 
ParallelStatsDisabledIT {
         return new HTable(utility.getConfiguration(), catalogTable);
     }
 
-}
\ No newline at end of file
+}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 05c0b4d..02fd037 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -48,6 +48,7 @@ import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.JAR_PATH_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_VALUE_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VALUE_BYTES;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VIEW_TTL_HWM;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MULTI_TENANT_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NULLABLE_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NUM_ARGS_BYTES;
@@ -69,13 +70,12 @@ import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.UPDATE_CACHE_FREQU
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.USE_STATS_FOR_PARALLELIZATION_BYTES;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT_BYTES;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_BYTES;
-import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT_BYTES;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE_BYTES;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE_BYTES;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TTL_BYTES;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TTL_HWM_BYTES;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TTL_NOT_DEFINED;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VIEW_TTL_HWM;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE_BYTES;
 import static 
org.apache.phoenix.query.QueryConstants.VIEW_MODIFIED_PROPERTY_TAG_TYPE;
 import static org.apache.phoenix.schema.PTableType.INDEX;
 import static org.apache.phoenix.util.SchemaUtil.getVarCharLength;
@@ -98,13 +98,6 @@ import java.util.NavigableMap;
 import java.util.Properties;
 import java.util.Set;
 
-import com.google.common.cache.Cache;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.protobuf.ByteString;
-import com.google.protobuf.RpcCallback;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.Service;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -242,6 +235,14 @@ import org.apache.phoenix.util.ViewUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.cache.Cache;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.Service;
+
 /**
  * Endpoint co-processor through which all Phoenix metadata mutations flow.
  * Phoenix metadata is stored in SYSTEM.CATALOG. The table specific information
@@ -2896,7 +2897,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
             // Query for the latest table first, since it's not cached
             table =
                     buildTable(key, cacheKey, region, 
HConstants.LATEST_TIMESTAMP, clientVersion);
-            if ((table != null && table.getTimeStamp() < clientTimeStamp) ||
+            if ((table != null && table.getTimeStamp() <= clientTimeStamp) ||
                     (blockWriteRebuildIndex && 
table.getIndexDisableTimestamp() > 0)) {
                 return table;
             }

Reply via email to