PHOENIX-2478 Rows committed in transaction overlapping index creation are not 
populated


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f572fa63
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f572fa63
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f572fa63

Branch: refs/heads/calcite
Commit: f572fa630a5bdfe5f1eab074838a82344d38063d
Parents: 1717f12
Author: James Taylor <jtay...@salesforce.com>
Authored: Tue Jan 19 20:23:51 2016 -0800
Committer: James Taylor <jtay...@salesforce.com>
Committed: Tue Jan 19 20:44:47 2016 -0800

----------------------------------------------------------------------
 .../phoenix/end2end/index/ImmutableIndexIT.java |   6 +-
 .../end2end/index/IndexExpressionIT.java        |  16 +--
 .../phoenix/compile/PostIndexDDLCompiler.java   |   2 +-
 .../phoenix/exception/SQLExceptionCode.java     | 108 +++++++++----------
 .../apache/phoenix/execute/MutationState.java   |  39 ++++---
 .../apache/phoenix/jdbc/PhoenixStatement.java   |   2 +-
 .../apache/phoenix/schema/MetaDataClient.java   |   2 +-
 .../org/apache/phoenix/schema/TableRef.java     |   8 +-
 .../phoenix/compile/QueryCompilerTest.java      |   2 +-
 9 files changed, 98 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f572fa63/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
index c4ecfbb..7171382 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
@@ -219,7 +219,7 @@ public class ImmutableIndexIT extends 
BaseHBaseManagedTimeIT {
                 futureList.add(threadPool.submit(new 
UpsertRunnable(fullTableName)));
             }
             // upsert some rows before creating the index 
-            Thread.sleep(5000);
+            Thread.sleep(500);
 
             // create the index 
             try (Connection conn2 = DriverManager.getConnection(getUrl(), 
props)) {
@@ -230,14 +230,14 @@ public class ImmutableIndexIT extends 
BaseHBaseManagedTimeIT {
             }
 
             // upsert some rows after creating the index
-            Thread.sleep(1000);
+            Thread.sleep(100);
             // cancel the running threads
             for (Future<?> future : futureList) {
                 future.cancel(true);
             }
             threadPool.shutdownNow();
             threadPool.awaitTermination(30, TimeUnit.SECONDS);
-            Thread.sleep(1000);
+            Thread.sleep(100);
 
             ResultSet rs;
             rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ 
COUNT(*) FROM " + fullTableName);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f572fa63/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
index 7da0d85..7be8d41 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
@@ -95,7 +95,7 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT 
{
 
     private void verifyResult(ResultSet rs, int i) throws SQLException {
         assertTrue(rs.next());
-        assertEquals("VARCHAR" + String.valueOf(i) + "_" + 
StringUtils.rightPad("CHAR" + String.valueOf(i), 6, ' ')
+        assertEquals("VARCHAR" + String.valueOf(i) + "_" + 
StringUtils.rightPad("CHAR" + String.valueOf(i), 10, ' ')
                 + "_A.VARCHAR" + String.valueOf(i) + "_" + 
StringUtils.rightPad("B.CHAR" + String.valueOf(i), 10, ' '),
                 rs.getString(1));
         assertEquals(i * 3, rs.getInt(2));
@@ -141,7 +141,7 @@ public class IndexExpressionIT extends 
BaseHBaseManagedTimeIT {
                     // DECIMAL in the index (which is not fixed width)
                     + " AND date_pk+1=? AND date1+1=? AND date2+1=?";
             stmt = conn.prepareStatement(whereSql);
-            stmt.setString(1, "VARCHAR1_CHAR1 _A.VARCHAR1_B.CHAR1   ");
+            stmt.setString(1, "VARCHAR1_CHAR1     _A.VARCHAR1_B.CHAR1   ");
             stmt.setInt(2, 3);
             Date date = DateUtil.parseDate("2015-01-02 00:00:00");
             stmt.setDate(3, date);
@@ -153,8 +153,8 @@ public class IndexExpressionIT extends 
BaseHBaseManagedTimeIT {
             assertEquals(
                     localIndex ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER 
_LOCAL_IDX_INDEX_TEST."
                             + dataTableName
-                            + " [-32768,'VARCHAR1_CHAR1 _A.VARCHAR1_B.CHAR1   
',3,'2015-01-02 00:00:00.000',1,420,156,800,000,1,420,156,800,000]\nCLIENT 
MERGE SORT"
-                            : "CLIENT PARALLEL 1-WAY RANGE SCAN OVER 
INDEX_TEST.IDX ['VARCHAR1_CHAR1 _A.VARCHAR1_B.CHAR1   ',3,'2015-01-02 
00:00:00.000',1,420,156,800,000,1,420,156,800,000]",
+                            + " [-32768,'VARCHAR1_CHAR1     
_A.VARCHAR1_B.CHAR1   ',3,'2015-01-02 
00:00:00.000',1,420,156,800,000,1,420,156,800,000]\nCLIENT MERGE SORT"
+                            : "CLIENT PARALLEL 1-WAY RANGE SCAN OVER 
INDEX_TEST.IDX ['VARCHAR1_CHAR1     _A.VARCHAR1_B.CHAR1   ',3,'2015-01-02 
00:00:00.000',1,420,156,800,000,1,420,156,800,000]",
                     QueryUtil.getExplainPlan(rs));
 
             // verify that the correct results are returned
@@ -254,20 +254,20 @@ public class IndexExpressionIT extends 
BaseHBaseManagedTimeIT {
                     + fullDataTableName;
             ResultSet rs = conn.createStatement().executeQuery("SELECT /*+ 
NO_INDEX */ " + selectSql);
             assertTrue(rs.next());
-            assertEquals("VARCHAR1_CHAR1 _A.VARCHAR_UPDATED_B.CHAR1   ", 
rs.getString(1));
+            assertEquals("VARCHAR1_CHAR1     _A.VARCHAR_UPDATED_B.CHAR1   ", 
rs.getString(1));
             assertEquals(101, rs.getLong(2));
             assertTrue(rs.next());
-            assertEquals("VARCHAR2_CHAR2 _A.VARCHAR2_B.CHAR2   ", 
rs.getString(1));
+            assertEquals("VARCHAR2_CHAR2     _A.VARCHAR2_B.CHAR2   ", 
rs.getString(1));
             assertEquals(2, rs.getLong(2));
             assertFalse(rs.next());
 
             // verify that the rows in the index table are also updated
             rs = conn.createStatement().executeQuery("SELECT " + selectSql);
             assertTrue(rs.next());
-            assertEquals("VARCHAR1_CHAR1 _A.VARCHAR_UPDATED_B.CHAR1   ", 
rs.getString(1));
+            assertEquals("VARCHAR1_CHAR1     _A.VARCHAR_UPDATED_B.CHAR1   ", 
rs.getString(1));
             assertEquals(101, rs.getLong(2));
             assertTrue(rs.next());
-            assertEquals("VARCHAR2_CHAR2 _A.VARCHAR2_B.CHAR2   ", 
rs.getString(1));
+            assertEquals("VARCHAR2_CHAR2     _A.VARCHAR2_B.CHAR2   ", 
rs.getString(1));
             assertEquals(2, rs.getLong(2));
             assertFalse(rs.next());
             conn.createStatement().execute("DROP INDEX IDX ON " + 
fullDataTableName);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f572fa63/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
index bb0b595..1a667ae 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
@@ -120,7 +120,7 @@ public class PostIndexDDLCompiler {
             DelegateMutationPlan delegate = new 
DelegateMutationPlan(statement.compileMutation(updateStmtStr.toString())) {
                 @Override
                 public MutationState execute() throws SQLException {
-                    connection.getMutationState().commitWriteFence(dataTable);
+                    connection.getMutationState().commitDDLFence(dataTable);
                     return super.execute();
                 }
             };

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f572fa63/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index 9767cbe..b1d8e7d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -131,53 +131,53 @@ public enum SQLExceptionCode {
     PRIMARY_KEY_MISSING(509, "42888", "The table does not have a primary 
key."),
     PRIMARY_KEY_ALREADY_EXISTS(510, "42889", "The table already has a primary 
key."),
     ORDER_BY_NOT_IN_SELECT_DISTINCT(511, "42890", "All ORDER BY expressions 
must appear in SELECT DISTINCT:"),
-    INVALID_PRIMARY_KEY_CONSTRAINT(512, "42891", "Invalid column reference in 
primary key constraint"),
-    ARRAY_NOT_ALLOWED_IN_PRIMARY_KEY(513, "42892", "Array type not allowed as 
primary key constraint"),
+    INVALID_PRIMARY_KEY_CONSTRAINT(512, "42891", "Invalid column reference in 
primary key constraint."),
+    ARRAY_NOT_ALLOWED_IN_PRIMARY_KEY(513, "42892", "Array type not allowed as 
primary key constraint."),
     COLUMN_EXIST_IN_DEF(514, "42892", "A duplicate column name was detected in 
the object definition or ALTER TABLE statement.", new Factory() {
         @Override
         public SQLException newException(SQLExceptionInfo info) {
             return new ColumnAlreadyExistsException(info.getSchemaName(), 
info.getTableName(), info.getColumnName());
         }
     }),
-    ORDER_BY_ARRAY_NOT_SUPPORTED(515, "42893", "ORDER BY of an array type is 
not allowed"),
-    NON_EQUALITY_ARRAY_COMPARISON(516, "42894", "Array types may only be 
compared using = or !="),
-    INVALID_NOT_NULL_CONSTRAINT(517, "42895", "Invalid not null constraint on 
non primary key column"),
+    ORDER_BY_ARRAY_NOT_SUPPORTED(515, "42893", "ORDER BY of an array type is 
not allowed."),
+    NON_EQUALITY_ARRAY_COMPARISON(516, "42894", "Array types may only be 
compared using = or !=."),
+    INVALID_NOT_NULL_CONSTRAINT(517, "42895", "Invalid not null constraint on 
non primary key column."),
 
     /**
      *  Invalid Transaction State (errorcode 05, sqlstate 25)
      */
      READ_ONLY_CONNECTION(518,"25502","Mutations are not permitted for a 
read-only connection."),
 
-     VARBINARY_ARRAY_NOT_SUPPORTED(519, "42896", "VARBINARY ARRAY is not 
supported"),
+     VARBINARY_ARRAY_NOT_SUPPORTED(519, "42896", "VARBINARY ARRAY is not 
supported."),
 
      /**
       *  Expression Index exceptions.
       */
-     AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX(520, "42897", "Aggregate 
expression not allowed in an index"),
-     NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX(521, "42898", 
"Non-deterministic expression not allowed in an index"),
-     STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX(522, "42899", "Stateless 
expression not allowed in an index"),
+     AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX(520, "42897", "Aggregate 
expression not allowed in an index."),
+     NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX(521, "42898", 
"Non-deterministic expression not allowed in an index."),
+     STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX(522, "42899", "Stateless 
expression not allowed in an index."),
      
      /**
       *  Transaction exceptions.
       */
-     TRANSACTION_CONFLICT_EXCEPTION(523, "42900", "Transaction aborted due to 
conflict with other mutations"),
-     TRANSACTION_EXCEPTION(524, "42901", "Transaction aborted due to error"),
+     TRANSACTION_CONFLICT_EXCEPTION(523, "42900", "Transaction aborted due to 
conflict with other mutations."),
+     TRANSACTION_EXCEPTION(524, "42901", "Transaction aborted due to error."),
 
      /**
       * Union All related errors
       */
-     SELECT_COLUMN_NUM_IN_UNIONALL_DIFFS(525, "42902", "SELECT column number 
differs in a Union All query is not allowed"),
-     SELECT_COLUMN_TYPE_IN_UNIONALL_DIFFS(526, "42903", "SELECT column types 
differ in a Union All query is not allowed"),
+     SELECT_COLUMN_NUM_IN_UNIONALL_DIFFS(525, "42902", "SELECT column number 
differs in a Union All query is not allowed."),
+     SELECT_COLUMN_TYPE_IN_UNIONALL_DIFFS(526, "42903", "SELECT column types 
differ in a Union All query is not allowed."),
 
      /**
       * Row timestamp column related errors
       */
-     ROWTIMESTAMP_ONE_PK_COL_ONLY(527, "42904", "Only one column that is part 
of the primary key can be declared as a ROW_TIMESTAMP"),
-     ROWTIMESTAMP_PK_COL_ONLY(528, "42905", "Only columns part of the primary 
key can be declared as a ROW_TIMESTAMP"),
-     ROWTIMESTAMP_CREATE_ONLY(529, "42906", "A column can be added as 
ROW_TIMESTAMP only in CREATE TABLE"),
-     ROWTIMESTAMP_COL_INVALID_TYPE(530, "42907", "A column can be added as 
ROW_TIMESTAMP only if it is of type DATE, BIGINT, TIME OR TIMESTAMP"),
-     ROWTIMESTAMP_NOT_ALLOWED_ON_VIEW(531, "42908", "Declaring a column as 
row_timestamp is not allowed for views"),
-     INVALID_SCN(532, "42909", "Value of SCN cannot be less than zero"),
+     ROWTIMESTAMP_ONE_PK_COL_ONLY(527, "42904", "Only one column that is part 
of the primary key can be declared as a ROW_TIMESTAMP."),
+     ROWTIMESTAMP_PK_COL_ONLY(528, "42905", "Only columns part of the primary 
key can be declared as a ROW_TIMESTAMP."),
+     ROWTIMESTAMP_CREATE_ONLY(529, "42906", "A column can be added as 
ROW_TIMESTAMP only in CREATE TABLE."),
+     ROWTIMESTAMP_COL_INVALID_TYPE(530, "42907", "A column can be added as 
ROW_TIMESTAMP only if it is of type DATE, BIGINT, TIME OR TIMESTAMP."),
+     ROWTIMESTAMP_NOT_ALLOWED_ON_VIEW(531, "42908", "Declaring a column as 
row_timestamp is not allowed for views."),
+     INVALID_SCN(532, "42909", "Value of SCN cannot be less than zero."),
      /**
      * HBase and Phoenix specific implementation defined sub-classes.
      * Column family related exceptions.
@@ -241,10 +241,10 @@ public enum SQLExceptionCode {
     INVALID_INDEX_STATE_TRANSITION(1028, "42Y87", "Invalid index state 
transition."),
     INVALID_MUTABLE_INDEX_CONFIG(1029, "42Y88", "Mutable secondary indexes 
must have the "
             + IndexManagementUtil.WAL_EDIT_CODEC_CLASS_KEY + " property set to 
"
-            +  IndexManagementUtil.INDEX_WAL_EDIT_CODEC_CLASS_NAME + " in the 
hbase-sites.xml of every region server"),
+            +  IndexManagementUtil.INDEX_WAL_EDIT_CODEC_CLASS_NAME + " in the 
hbase-sites.xml of every region server."),
 
 
-    CANNOT_CREATE_TENANT_SPECIFIC_TABLE(1030, "42Y89", "Cannot create table 
for tenant-specific connection"),
+    CANNOT_CREATE_TENANT_SPECIFIC_TABLE(1030, "42Y89", "Cannot create table 
for tenant-specific connection."),
     DEFAULT_COLUMN_FAMILY_ONLY_ON_CREATE_TABLE(1034, "42Y93", "Default column 
family may only be specified when creating a table."),
     INSUFFICIENT_MULTI_TENANT_COLUMNS(1040, "42Y96", "A MULTI_TENANT table 
must have two or more PK columns with the first column being NOT NULL."),
     TENANTID_IS_OF_WRONG_TYPE(1041, "42Y97", "The TenantId could not be 
converted to correct format for this table."),
@@ -254,33 +254,33 @@ public enum SQLExceptionCode {
     NO_LOCAL_INDEX_ON_TABLE_WITH_IMMUTABLE_ROWS(1048,"43A05","Local indexes 
aren't allowed on tables with immutable rows."),
     COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY(1049, "43A06", "Column family not 
allowed for table properties."),
     COLUMN_FAMILY_NOT_ALLOWED_FOR_TTL(1050, "43A07", "Setting TTL for a column 
family not supported. You can only have TTL for the entire table."),
-    CANNOT_ALTER_PROPERTY(1051, "43A08", "Property can be specified or changed 
only when creating a table"),
-    CANNOT_SET_PROPERTY_FOR_COLUMN_NOT_ADDED(1052, "43A09", "Property cannot 
be specified for a column family that is not being added or modified"),
-    CANNOT_SET_TABLE_PROPERTY_ADD_COLUMN(1053, "43A10", "Table level property 
cannot be set when adding a column"),
+    CANNOT_ALTER_PROPERTY(1051, "43A08", "Property can be specified or changed 
only when creating a table."),
+    CANNOT_SET_PROPERTY_FOR_COLUMN_NOT_ADDED(1052, "43A09", "Property cannot 
be specified for a column family that is not being added or modified."),
+    CANNOT_SET_TABLE_PROPERTY_ADD_COLUMN(1053, "43A10", "Table level property 
cannot be set when adding a column."),
 
     NO_LOCAL_INDEXES(1054, "43A11", "Local secondary indexes are not supported 
for HBase versions " +
         
MetaDataUtil.decodeHBaseVersionAsString(PhoenixDatabaseMetaData.MIN_LOCAL_SI_VERSION_DISALLOW)
 + " through " + 
MetaDataUtil.decodeHBaseVersionAsString(PhoenixDatabaseMetaData.MAX_LOCAL_SI_VERSION_DISALLOW)
 + " inclusive."),
     UNALLOWED_LOCAL_INDEXES(1055, "43A12", "Local secondary indexes are 
configured to not be allowed."),
     
-    DESC_VARBINARY_NOT_SUPPORTED(1056, "43A13", "Descending VARBINARY columns 
not supported"),
+    DESC_VARBINARY_NOT_SUPPORTED(1056, "43A13", "Descending VARBINARY columns 
not supported."),
     
-    DEFAULT_COLUMN_FAMILY_ON_SHARED_TABLE(1069, "43A13", "Default column 
family not allowed on VIEW or shared INDEX"),
-    ONLY_TABLE_MAY_BE_DECLARED_TRANSACTIONAL(1070, "44A01", "Only tables may 
be declared as transactional"),
-    TX_MAY_NOT_SWITCH_TO_NON_TX(1071, "44A02", "A transactional table may not 
be switched to non transactional"),
-       STORE_NULLS_MUST_BE_TRUE_FOR_TRANSACTIONAL(1072, "44A03", "Store nulls 
must be true when a table is transactional"),
-    CANNOT_START_TRANSACTION_WITH_SCN_SET(1073, "44A04", "Cannot start a 
transaction on a connection with SCN set"),
-    TX_MAX_VERSIONS_MUST_BE_GREATER_THAN_ONE(1074, "44A05", "A transactional 
table must define VERSION of greater than one"),
-    CANNOT_SPECIFY_SCN_FOR_TXN_TABLE(1075, "44A06", "Cannot use a connection 
with SCN set for a transactional table"),
-    NULL_TRANSACTION_CONTEXT(1076, "44A07", "No Transaction Context 
available"),
+    DEFAULT_COLUMN_FAMILY_ON_SHARED_TABLE(1069, "43A13", "Default column 
family not allowed on VIEW or shared INDEX."),
+    ONLY_TABLE_MAY_BE_DECLARED_TRANSACTIONAL(1070, "44A01", "Only tables may 
be declared as transactional."),
+    TX_MAY_NOT_SWITCH_TO_NON_TX(1071, "44A02", "A transactional table may not 
be switched to non transactional."),
+       STORE_NULLS_MUST_BE_TRUE_FOR_TRANSACTIONAL(1072, "44A03", "Store nulls 
must be true when a table is transactional."),
+    CANNOT_START_TRANSACTION_WITH_SCN_SET(1073, "44A04", "Cannot start a 
transaction on a connection with SCN set."),
+    TX_MAX_VERSIONS_MUST_BE_GREATER_THAN_ONE(1074, "44A05", "A transactional 
table must define VERSION of greater than one."),
+    CANNOT_SPECIFY_SCN_FOR_TXN_TABLE(1075, "44A06", "Cannot use a connection 
with SCN set for a transactional table."),
+    NULL_TRANSACTION_CONTEXT(1076, "44A07", "No Transaction Context 
available."),
     TRANSACTION_FAILED(1077, "44A08", "Transaction Failure "),
-    CANNOT_CREATE_TXN_TABLE_IF_TXNS_DISABLED(1078, "44A09", "Cannot create a 
transactional table if transactions are disabled"),
-    CANNOT_ALTER_TO_BE_TXN_IF_TXNS_DISABLED(1079, "44A10", "Cannot alter table 
to be transactional table if transactions are disabled"),
-    CANNOT_CREATE_TXN_TABLE_WITH_ROW_TIMESTAMP(1080, "44A11", "Cannot create a 
transactional table if transactions are disabled"),
-    CANNOT_ALTER_TO_BE_TXN_WITH_ROW_TIMESTAMP(1081, "44A12", "Cannot alter 
table to be transactional table if transactions are disabled"),
-    TX_MUST_BE_ENABLED_TO_SET_TX_CONTEXT(1082, "44A13", "Cannot set 
transaction context if transactions are disabled"),
-    TX_MUST_BE_ENABLED_TO_SET_AUTO_FLUSH(1083, "44A14", "Cannot set auto flush 
if transactions are disabled"),
-    TX_MUST_BE_ENABLED_TO_SET_ISOLATION_LEVEL(1084, "44A15", "Cannot set 
isolation level to TRANSACTION_READ_COMMITTED or TRANSACTION_SERIALIZABLE if 
transactions are disabled"),
-    TX_UNABLE_TO_GET_WRITE_FENCE(1085, "44A16", "Unable to obtain write fence 
for DDL operation"),
+    CANNOT_CREATE_TXN_TABLE_IF_TXNS_DISABLED(1078, "44A09", "Cannot create a 
transactional table if transactions are disabled."),
+    CANNOT_ALTER_TO_BE_TXN_IF_TXNS_DISABLED(1079, "44A10", "Cannot alter table 
to be transactional table if transactions are disabled."),
+    CANNOT_CREATE_TXN_TABLE_WITH_ROW_TIMESTAMP(1080, "44A11", "Cannot create a 
transactional table if transactions are disabled."),
+    CANNOT_ALTER_TO_BE_TXN_WITH_ROW_TIMESTAMP(1081, "44A12", "Cannot alter 
table to be transactional table if transactions are disabled."),
+    TX_MUST_BE_ENABLED_TO_SET_TX_CONTEXT(1082, "44A13", "Cannot set 
transaction context if transactions are disabled."),
+    TX_MUST_BE_ENABLED_TO_SET_AUTO_FLUSH(1083, "44A14", "Cannot set auto flush 
if transactions are disabled."),
+    TX_MUST_BE_ENABLED_TO_SET_ISOLATION_LEVEL(1084, "44A15", "Cannot set 
isolation level to TRANSACTION_READ_COMMITTED or TRANSACTION_SERIALIZABLE if 
transactions are disabled."),
+    TX_UNABLE_TO_GET_WRITE_FENCE(1085, "44A16", "Unable to obtain write fence 
for DDL operation."),
 
     /** Sequence related */
     SEQUENCE_ALREADY_EXIST(1200, "42Z00", "Sequence already exists.", new 
Factory() {
@@ -300,17 +300,17 @@ public enum SQLExceptionCode {
     CACHE_MUST_BE_NON_NEGATIVE_CONSTANT(1204, "42Z04", "Sequence CACHE value 
must be a non negative integer constant."),
     INVALID_USE_OF_NEXT_VALUE_FOR(1205, "42Z05", "NEXT VALUE FOR may only be 
used as in a SELECT or an UPSERT VALUES expression."),
     CANNOT_CALL_CURRENT_BEFORE_NEXT_VALUE(1206, "42Z06", "NEXT VALUE FOR must 
be called before CURRENT VALUE FOR is called."),
-    EMPTY_SEQUENCE_CACHE(1207, "42Z07", "No more cached sequence values"),
+    EMPTY_SEQUENCE_CACHE(1207, "42Z07", "No more cached sequence values."),
     MINVALUE_MUST_BE_CONSTANT(1208, "42Z08", "Sequence MINVALUE must be an 
integer or long constant."),
     MAXVALUE_MUST_BE_CONSTANT(1209, "42Z09", "Sequence MAXVALUE must be an 
integer or long constant."),
     MINVALUE_MUST_BE_LESS_THAN_OR_EQUAL_TO_MAXVALUE(1210, "42Z10", "Sequence 
MINVALUE must be less than or equal to MAXVALUE."),
     STARTS_WITH_MUST_BE_BETWEEN_MIN_MAX_VALUE(1211, "42Z11",
-            "STARTS WITH value must be greater than or equal to MINVALUE and 
less than or equal to MAXVALUE"),
-    SEQUENCE_VAL_REACHED_MAX_VALUE(1212, "42Z12", "Reached MAXVALUE of 
sequence"),
-    SEQUENCE_VAL_REACHED_MIN_VALUE(1213, "42Z13", "Reached MINVALUE of 
sequence"),
-    INCREMENT_BY_MUST_NOT_BE_ZERO(1214, "42Z14", "Sequence INCREMENT BY value 
cannot be zero"),
+            "STARTS WITH value must be greater than or equal to MINVALUE and 
less than or equal to MAXVALUE."),
+    SEQUENCE_VAL_REACHED_MAX_VALUE(1212, "42Z12", "Reached MAXVALUE of 
sequence."),
+    SEQUENCE_VAL_REACHED_MIN_VALUE(1213, "42Z13", "Reached MINVALUE of 
sequence."),
+    INCREMENT_BY_MUST_NOT_BE_ZERO(1214, "42Z14", "Sequence INCREMENT BY value 
cannot be zero."),
     NUM_SEQ_TO_ALLOCATE_MUST_BE_CONSTANT(1215, "42Z15", "Sequence NEXT n 
VALUES FOR must be a positive integer or constant." ),
-    NUM_SEQ_TO_ALLOCATE_NOT_SUPPORTED(1216, "42Z16", "Sequence NEXT n VALUES 
FOR is not supported for Sequences with the CYCLE flag" ),
+    NUM_SEQ_TO_ALLOCATE_NOT_SUPPORTED(1216, "42Z16", "Sequence NEXT n VALUES 
FOR is not supported for Sequences with the CYCLE flag." ),
 
     /** Parser error. (errorcode 06, sqlState 42P) */
     PARSER_ERROR(601, "42P00", "Syntax error.", Factory.SYNTAX_ERROR),
@@ -323,7 +323,7 @@ public enum SQLExceptionCode {
      * Implementation defined class. Execution exceptions (errorcode 11, 
sqlstate XCL).
      */
     RESULTSET_CLOSED(1101, "XCL01", "ResultSet is closed."),
-    GET_TABLE_REGIONS_FAIL(1102, "XCL02", "Cannot get all table regions"),
+    GET_TABLE_REGIONS_FAIL(1102, "XCL02", "Cannot get all table regions."),
     EXECUTE_QUERY_NOT_APPLICABLE(1103, "XCL03", "executeQuery may not be 
used."),
     EXECUTE_UPDATE_NOT_APPLICABLE(1104, "XCL04", "executeUpdate may not be 
used."),
     SPLIT_POINT_NOT_CONSTANT(1105, "XCL05", "Split points must be constants."),
@@ -335,22 +335,22 @@ public enum SQLExceptionCode {
             return new StaleRegionBoundaryCacheException(info.getSchemaName(), 
info.getTableName());
         }
     }),
-    CANNOT_SPLIT_LOCAL_INDEX(1109,"XCL09", "Local index may not be pre-split"),
-    CANNOT_SALT_LOCAL_INDEX(1110,"XCL10", "Local index may not be salted"),
+    CANNOT_SPLIT_LOCAL_INDEX(1109,"XCL09", "Local index may not be 
pre-split."),
+    CANNOT_SALT_LOCAL_INDEX(1110,"XCL10", "Local index may not be salted."),
 
     /**
      * Implementation defined class. Phoenix internal error. (errorcode 20, 
sqlstate INT).
      */
     CANNOT_CALL_METHOD_ON_TYPE(2001, "INT01", "Cannot call method on the 
argument type."),
-    CLASS_NOT_UNWRAPPABLE(2002, "INT03", "Class not unwrappable"),
+    CLASS_NOT_UNWRAPPABLE(2002, "INT03", "Class not unwrappable."),
     PARAM_INDEX_OUT_OF_BOUND(2003, "INT04", "Parameter position is out of 
range."),
-    PARAM_VALUE_UNBOUND(2004, "INT05", "Parameter value unbound"),
+    PARAM_VALUE_UNBOUND(2004, "INT05", "Parameter value unbound."),
     INTERRUPTED_EXCEPTION(2005, "INT07", "Interrupted exception."),
     INCOMPATIBLE_CLIENT_SERVER_JAR(2006, "INT08", "Incompatible jars detected 
between client and server."),
     OUTDATED_JARS(2007, "INT09", "Outdated jars."),
     INDEX_METADATA_NOT_FOUND(2008, "INT10", "Unable to find cached index 
metadata. "),
-    UNKNOWN_ERROR_CODE(2009, "INT11", "Unknown error code"),
-    OPERATION_TIMED_OUT(6000, "TIM01", "Operation timed out", new Factory() {
+    UNKNOWN_ERROR_CODE(2009, "INT11", "Unknown error code."),
+    OPERATION_TIMED_OUT(6000, "TIM01", "Operation timed out.", new Factory() {
         @Override
         public SQLException newException(SQLExceptionInfo info) {
             return new SQLTimeoutException(OPERATION_TIMED_OUT.getMessage(),

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f572fa63/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index d3b36ec..3dfae46 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -210,12 +210,12 @@ public class MutationState implements SQLCloseable {
      * when a data table transaction is started before the create index
      * but completes after it. In this case, we need to rerun the data
      * table transaction after the index creation so that the index rows
-     * are generated. See {@link #addReadFence(PTable)} and TEPHRA-157
+     * are generated. See {@link #addDMLFence(PTable)} and TEPHRA-157
      * for more information.
      * @param dataTable the data table upon which an index is being added
      * @throws SQLException
      */
-    public void commitWriteFence(PTable dataTable) throws SQLException {
+    public void commitDDLFence(PTable dataTable) throws SQLException {
         if (dataTable.isTransactional()) {
             byte[] key = SchemaUtil.getTableKey(dataTable);
             boolean success = false;
@@ -249,12 +249,12 @@ public class MutationState implements SQLCloseable {
     /**
      * Add an entry to the change set representing the DML operation that is 
starting.
      * These entries will not conflict with each other, but they will conflict 
with a
-     * DDL operation of creating an index. See {@link #addReadFence(PTable)} 
and TEPHRA-157
+     * DDL operation of creating an index. See {@link #addDMLFence(PTable)} 
and TEPHRA-157
      * for more information.
      * @param dataTable the table which is doing DML
      * @throws SQLException
      */
-    public void addReadFence(PTable dataTable) throws SQLException {
+    public void addDMLFence(PTable dataTable) throws SQLException {
         if (this.txContext == null) {
             throw new 
SQLExceptionInfo.Builder(SQLExceptionCode.NULL_TRANSACTION_CONTEXT).build().buildException();
         }
@@ -1004,7 +1004,9 @@ public class MutationState implements SQLCloseable {
                     // Keep all mutations we've encountered until a commit or 
rollback.
                     // This is not ideal, but there's not good way to get the 
values back
                     // in the event that we need to replay the commit.
-                    joinMutationState(tableRef, valuesMap, txMutations);
+                    // Copy TableRef so we have the original PTable and know 
when the
+                    // indexes have changed.
+                    joinMutationState(new TableRef(tableRef), valuesMap, 
txMutations);
                 }
                 // Remove batches as we process them
                 if (sendAll) {
@@ -1180,11 +1182,11 @@ public class MutationState implements SQLCloseable {
                             Set<TableRef> txTableRefs = txMutations.keySet();
                             for (TableRef tableRef : txTableRefs) {
                                 PTable dataTable = tableRef.getTable();
-                                addReadFence(dataTable);
+                                addDMLFence(dataTable);
                             }
                             try {
                                 // Only retry if an index was added
-                                retryCommit = wasIndexAdded(txTableRefs);
+                                retryCommit = 
shouldResubmitTransaction(txTableRefs);
                             } catch (SQLException e) {
                                 retryCommit = false;
                                 if (sqlE == null) {
@@ -1214,11 +1216,12 @@ public class MutationState implements SQLCloseable {
      * @return true if indexes were added and false otherwise.
      * @throws SQLException 
      */
-    private boolean wasIndexAdded(Set<TableRef> txTableRefs) throws 
SQLException {
+    private boolean shouldResubmitTransaction(Set<TableRef> txTableRefs) 
throws SQLException {
         if (logger.isInfoEnabled()) logger.info("Checking for index updates as 
of "  + getInitialWritePointer());
         MetaDataClient client = new MetaDataClient(connection);
         PMetaData cache = connection.getMetaDataCache();
-        boolean addedIndexes = false;
+        boolean addedAnyIndexes = false;
+        boolean allImmutableTables = !txTableRefs.isEmpty();
         for (TableRef tableRef : txTableRefs) {
             PTable dataTable = tableRef.getTable();
             List<PTable> oldIndexes;
@@ -1227,20 +1230,24 @@ public class MutationState implements SQLCloseable {
             MetaDataMutationResult result = 
client.updateCache(dataTable.getTenantId(), 
dataTable.getSchemaName().getString(), dataTable.getTableName().getString());
             long timestamp = TransactionUtil.getResolvedTime(connection, 
result);
             tableRef.setTimeStamp(timestamp);
-            if (result.getTable() == null) {
+            PTable updatedDataTable = result.getTable();
+            if (updatedDataTable == null) {
                 throw new 
TableNotFoundException(dataTable.getSchemaName().getString(), 
dataTable.getTableName().getString());
             }
-            tableRef.setTable(result.getTable());
-            if (!addedIndexes) {
+            allImmutableTables |= updatedDataTable.isImmutableRows();
+            tableRef.setTable(updatedDataTable);
+            if (!addedAnyIndexes) {
                 // TODO: in theory we should do a deep equals check here, as 
it's possible
                 // that an index was dropped and recreated with the same name 
but different
                 // indexed/covered columns.
-                addedIndexes = 
(!oldIndexes.equals(result.getTable().getIndexes()));
-                if (logger.isInfoEnabled()) logger.info((addedIndexes ? 
"Updates " : "No updates ") + "as of "  + timestamp + " to " + 
dataTable.getName().getString() + " with indexes " + 
tableRef.getTable().getIndexes());
+                addedAnyIndexes = 
(!oldIndexes.equals(updatedDataTable.getIndexes()));
+                if (logger.isInfoEnabled()) logger.info((addedAnyIndexes ? 
"Updates " : "No updates ") + "as of "  + timestamp + " to " + 
updatedDataTable.getName().getString() + " with indexes " + 
updatedDataTable.getIndexes());
             }
         }
-        if (logger.isInfoEnabled()) logger.info((addedIndexes ? "Updates " : 
"No updates ") + "to indexes as of "  + getInitialWritePointer());
-        return addedIndexes;
+        if (logger.isInfoEnabled()) logger.info((addedAnyIndexes ? "Updates " 
: "No updates ") + "to indexes as of "  + getInitialWritePointer() + " over " + 
(allImmutableTables ? " all immutable tables" : " some mutable tables"));
+        // If all tables are immutable, we know the conflict we got was due to 
our DDL/DML fence.
+        // If any indexes were added, then the conflict might be due to 
DDL/DML fence.
+        return allImmutableTables || addedAnyIndexes;
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f572fa63/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index 6bb5722..b54ccd5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -331,7 +331,7 @@ public class PhoenixStatement implements Statement, 
SQLCloseable {
                                 MutationPlan plan = 
stmt.compilePlan(PhoenixStatement.this, Sequence.ValueOp.VALIDATE_SEQUENCE);
                                 if (plan.getTargetRef() != null && 
plan.getTargetRef().getTable() != null && 
plan.getTargetRef().getTable().isTransactional()) {
                                     state.startTransaction();
-                                    
state.addReadFence(plan.getTargetRef().getTable());
+                                    
state.addDMLFence(plan.getTargetRef().getTable());
                                 }
                                 Iterator<TableRef> tableRefs = 
plan.getSourceRefs().iterator();
                                 state.sendUncommitted(tableRefs);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f572fa63/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index d134f08..0b446b3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -1074,7 +1074,7 @@ public class MetaDataClient {
 
                         @Override
                         public MutationState execute() throws SQLException {
-                            
connection.getMutationState().commitWriteFence(dataTable);
+                            
connection.getMutationState().commitDDLFence(dataTable);
                             Cell kv = plan.iterator().next().getValue(0);
                             ImmutableBytesWritable tmpPtr = new 
ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), 
kv.getValueLength());
                             // A single Cell will be returned with the 
count(*) - we decode that here

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f572fa63/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
index 35e2f77..8f6e271 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
@@ -36,12 +36,16 @@ public class TableRef {
     private final long lowerBoundTimeStamp;
     private final boolean hasDynamicCols;
 
+    public TableRef(TableRef tableRef) {
+        this(tableRef.alias, tableRef.table, tableRef.upperBoundTimeStamp, 
tableRef.lowerBoundTimeStamp, tableRef.hasDynamicCols);
+    }
+    
     public TableRef(TableRef tableRef, long timeStamp) {
-        this(tableRef.alias, tableRef.table, timeStamp, 
tableRef.hasDynamicCols);
+        this(tableRef.alias, tableRef.table, timeStamp, 
tableRef.lowerBoundTimeStamp, tableRef.hasDynamicCols);
     }
     
     public TableRef(TableRef tableRef, String alias) {
-        this(alias, tableRef.table, tableRef.upperBoundTimeStamp, 
tableRef.hasDynamicCols);
+        this(alias, tableRef.table, tableRef.upperBoundTimeStamp, 
tableRef.lowerBoundTimeStamp, tableRef.hasDynamicCols);
     }
     
     public TableRef(PTable table) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f572fa63/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index 9411549..6d6dcdf 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -217,7 +217,7 @@ public class QueryCompilerTest extends 
BaseConnectionlessQueryTest {
             statement.execute();
             fail();
         } catch (SQLException e) {
-            assertTrue(e.getMessage(), e.getMessage().contains("ERROR 517 
(42895): Invalid not null constraint on non primary key column 
columnName=FOO.PK"));
+            
assertEquals(SQLExceptionCode.INVALID_NOT_NULL_CONSTRAINT.getErrorCode(), 
e.getErrorCode());
         } finally {
             conn.close();
         }

Reply via email to