phoenix git commit: PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client and server when upgraded from 4.6

2016-02-24 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 edd20d57c -> 1cf2518cc


PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client 
and server when upgraded from 4.6


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1cf2518c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1cf2518c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1cf2518c

Branch: refs/heads/4.x-HBase-0.98
Commit: 1cf2518cc577874826f682bd6bf9a37ccef18af3
Parents: edd20d5
Author: James Taylor 
Authored: Wed Feb 24 11:48:16 2016 -0800
Committer: James Taylor 
Committed: Wed Feb 24 12:00:27 2016 -0800

--
 .../phoenix/coprocessor/MetaDataProtocol.java   |  2 +-
 .../query/ConnectionQueryServicesImpl.java  | 35 
 2 files changed, 30 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1cf2518c/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index a704e22..fb9d228 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -73,7 +73,7 @@ public abstract class MetaDataProtocol extends 
MetaDataService {
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0 = 
MIN_TABLE_TIMESTAMP + 7;
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0 = 
MIN_TABLE_TIMESTAMP + 8;
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0 = 
MIN_TABLE_TIMESTAMP + 9;
-public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 = 
MIN_TABLE_TIMESTAMP + 13;
+public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 = 
MIN_TABLE_TIMESTAMP + 14;
 // MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the 
MIN_SYSTEM_TABLE_TIMESTAMP_* constants
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP = 
MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0;
 // TODO: pare this down to minimum, as we don't need duplicates for both 
table and column errors, nor should we need

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1cf2518c/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index ffcbd0f..0f32adf 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2366,17 +2366,20 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 // Add these columns one at a time, each 
with different timestamps so that if folks have
 // run the upgrade code already for a 
snapshot, we'll still enter this block (and do the
 // parts we haven't yet done).
-metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 2,
+metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 4,
 
PhoenixDatabaseMetaData.TRANSACTIONAL + " " + 
PBoolean.INSTANCE.getSqlTypeName());
 // Drop old stats table so that new stats 
table is created
 metaConnection = 
dropStatsTable(metaConnection,
-
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 1);
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 3);
 metaConnection = 
addColumnsIfNotExists(metaConnection,
 
PhoenixDatabaseMetaData.SYSTEM_CATALOG,
-
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0,
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 2,
 
PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " "
 + 
PLong.INSTANCE.getSqlTypeName

phoenix git commit: PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client and server when upgraded from 4.6

2016-02-24 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.0 8f1eb83b0 -> 76f17a5aa


PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client 
and server when upgraded from 4.6


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/76f17a5a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/76f17a5a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/76f17a5a

Branch: refs/heads/4.x-HBase-1.0
Commit: 76f17a5aa4b393770079d7d334c762b4c7841f59
Parents: 8f1eb83
Author: James Taylor 
Authored: Wed Feb 24 11:48:16 2016 -0800
Committer: James Taylor 
Committed: Wed Feb 24 12:08:24 2016 -0800

--
 .../phoenix/coprocessor/MetaDataProtocol.java   |  2 +-
 .../query/ConnectionQueryServicesImpl.java  | 35 
 2 files changed, 30 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/76f17a5a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index a704e22..fb9d228 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -73,7 +73,7 @@ public abstract class MetaDataProtocol extends 
MetaDataService {
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0 = 
MIN_TABLE_TIMESTAMP + 7;
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0 = 
MIN_TABLE_TIMESTAMP + 8;
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0 = 
MIN_TABLE_TIMESTAMP + 9;
-public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 = 
MIN_TABLE_TIMESTAMP + 13;
+public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 = 
MIN_TABLE_TIMESTAMP + 14;
 // MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the 
MIN_SYSTEM_TABLE_TIMESTAMP_* constants
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP = 
MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0;
 // TODO: pare this down to minimum, as we don't need duplicates for both 
table and column errors, nor should we need

http://git-wip-us.apache.org/repos/asf/phoenix/blob/76f17a5a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 4c48179..1c2e26a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2366,17 +2366,20 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 // Add these columns one at a time, each 
with different timestamps so that if folks have
 // run the upgrade code already for a 
snapshot, we'll still enter this block (and do the
 // parts we haven't yet done).
-metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 2,
+metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 4,
 
PhoenixDatabaseMetaData.TRANSACTIONAL + " " + 
PBoolean.INSTANCE.getSqlTypeName());
 // Drop old stats table so that new stats 
table is created
 metaConnection = 
dropStatsTable(metaConnection,
-
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 1);
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 3);
 metaConnection = 
addColumnsIfNotExists(metaConnection,
 
PhoenixDatabaseMetaData.SYSTEM_CATALOG,
-
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0,
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 2,
 
PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " "
 + 
PLong.INSTANCE.getSqlTypeName()

phoenix git commit: PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client and server when upgraded from 4.6

2016-02-24 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/master 05e6e2a3d -> 1f7b47a5e


PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client 
and server when upgraded from 4.6


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1f7b47a5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1f7b47a5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1f7b47a5

Branch: refs/heads/master
Commit: 1f7b47a5e75fe5f6014b4dcdeb4637d05fd12a43
Parents: 05e6e2a
Author: James Taylor 
Authored: Wed Feb 24 11:48:16 2016 -0800
Committer: James Taylor 
Committed: Wed Feb 24 12:01:21 2016 -0800

--
 .../phoenix/coprocessor/MetaDataProtocol.java   |  2 +-
 .../query/ConnectionQueryServicesImpl.java  | 35 
 2 files changed, 30 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f7b47a5/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index a704e22..fb9d228 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -73,7 +73,7 @@ public abstract class MetaDataProtocol extends 
MetaDataService {
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0 = 
MIN_TABLE_TIMESTAMP + 7;
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0 = 
MIN_TABLE_TIMESTAMP + 8;
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0 = 
MIN_TABLE_TIMESTAMP + 9;
-public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 = 
MIN_TABLE_TIMESTAMP + 13;
+public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 = 
MIN_TABLE_TIMESTAMP + 14;
 // MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the 
MIN_SYSTEM_TABLE_TIMESTAMP_* constants
 public static final long MIN_SYSTEM_TABLE_TIMESTAMP = 
MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0;
 // TODO: pare this down to minimum, as we don't need duplicates for both 
table and column errors, nor should we need

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f7b47a5/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index d55ab30..d27a4bc 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2370,17 +2370,20 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 // Add these columns one at a time, each 
with different timestamps so that if folks have
 // run the upgrade code already for a 
snapshot, we'll still enter this block (and do the
 // parts we haven't yet done).
-metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 2,
+metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 4,
 
PhoenixDatabaseMetaData.TRANSACTIONAL + " " + 
PBoolean.INSTANCE.getSqlTypeName());
 // Drop old stats table so that new stats 
table is created
 metaConnection = 
dropStatsTable(metaConnection,
-
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 1);
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 3);
 metaConnection = 
addColumnsIfNotExists(metaConnection,
 
PhoenixDatabaseMetaData.SYSTEM_CATALOG,
-
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0,
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 2,
 
PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " "
 + 
PLong.INSTANCE.getSqlTypeName());
-  

[34/50] [abbrv] phoenix git commit: PHOENIX-2119 Do not copy underlying HBase configuration properties when connection properties are supplied

2016-02-24 Thread maryannxue
PHOENIX-2119 Do not copy underlying HBase configuration properties when 
connection properties are supplied


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9e2b4339
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9e2b4339
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9e2b4339

Branch: refs/heads/calcite
Commit: 9e2b4339242588f36a71a3920b36b2e2c7867d2d
Parents: d8e5a73
Author: James Taylor 
Authored: Tue Feb 16 12:55:21 2016 -0800
Committer: James Taylor 
Committed: Wed Feb 17 09:14:12 2016 -0800

--
 .../main/java/org/apache/phoenix/jdbc/PhoenixConnection.java  | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9e2b4339/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 82bf31a..215d815 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -58,8 +58,6 @@ import java.util.concurrent.LinkedBlockingQueue;
 import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
 
-import co.cask.tephra.TransactionContext;
-
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.htrace.Sampler;
@@ -76,7 +74,6 @@ import org.apache.phoenix.iterate.ParallelIteratorFactory;
 import org.apache.phoenix.iterate.TableResultIterator;
 import org.apache.phoenix.iterate.TableResultIteratorFactory;
 import org.apache.phoenix.jdbc.PhoenixStatement.PhoenixStatementParser;
-import org.apache.phoenix.monitoring.GlobalClientMetrics;
 import org.apache.phoenix.parse.PFunction;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.DelegateConnectionQueryServices;
@@ -119,6 +116,8 @@ import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap.Builder;
 import com.google.common.collect.Lists;
 
+import co.cask.tephra.TransactionContext;
+
 
 /**
  * 
@@ -304,7 +303,7 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
 private static Properties filterKnownNonProperties(Properties info) {
 Properties prunedProperties = info;
 for (String property : PhoenixRuntime.CONNECTION_PROPERTIES) {
-if (info.contains(property)) {
+if (info.containsKey(property)) {
 if (prunedProperties == info) {
 prunedProperties = PropertiesUtil.deepCopy(info);
 }



[04/50] [abbrv] phoenix git commit: PHOENIX-2647 Duplicate results in reverse scan when guideposts are traversed (Ankit Singhal)

2016-02-24 Thread maryannxue
PHOENIX-2647 Duplicate results in reverse scan when guideposts are traversed 
(Ankit Singhal)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b64edb75
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b64edb75
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b64edb75

Branch: refs/heads/calcite
Commit: b64edb75455d56fc2a5086043bdd6fa1064f2ca7
Parents: a82a0ff
Author: Ankit Singhal 
Authored: Fri Feb 5 23:45:04 2016 +0530
Committer: Ankit Singhal 
Committed: Fri Feb 5 23:45:04 2016 +0530

--
 .../apache/phoenix/end2end/ReverseScanIT.java   | 21 +
 .../phoenix/end2end/StatsCollectorIT.java   | 28 +++
 .../java/org/apache/phoenix/util/ScanUtil.java  | 49 ++--
 3 files changed, 64 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b64edb75/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
index 35a8025..2722be1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
@@ -168,6 +168,27 @@ public class ReverseScanIT extends BaseHBaseManagedTimeIT {
 }
 
 @Test
+public void testReverseScanForSpecificRangeInRegion() throws Exception {
+Connection conn;
+ResultSet rs;
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+conn = DriverManager.getConnection(getUrl(), props);
+conn.createStatement()
+.execute("CREATE TABLE T" + " ( k VARCHAR, c1.a bigint,c2.b 
bigint CONSTRAINT pk PRIMARY KEY (k)) ");
+conn.createStatement().execute("upsert into T values ('a',1,3)");
+conn.createStatement().execute("upsert into T values ('b',1,3)");
+conn.createStatement().execute("upsert into T values ('c',1,3)");
+conn.createStatement().execute("upsert into T values ('d',1,3)");
+conn.createStatement().execute("upsert into T values ('e',1,3)");
+conn.commit();
+rs = conn.createStatement().executeQuery("SELECT k FROM T where k>'b' 
and k<'d' order by k desc");
+assertTrue(rs.next());
+assertEquals("c", rs.getString(1));
+assertTrue(!rs.next());
+conn.close();
+}
+
+@Test
 public void testReverseScanIndex() throws Exception {
 String tenantId = getOrganizationId();
 initATableValues(tenantId, getSplitsAtRowKeys(tenantId), getUrl());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b64edb75/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
index caba259..4450152 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
@@ -123,31 +123,41 @@ public class StatsCollectorIT extends 
StatsCollectorAbstractIT {
 conn.close();
 }
 
-@Test
-public void testNoDuplicatesAfterUpdateStats() throws Throwable {
+private void testNoDuplicatesAfterUpdateStats(String splitKey) throws 
Throwable {
 Connection conn;
 PreparedStatement stmt;
 ResultSet rs;
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 conn = DriverManager.getConnection(getUrl(), props);
 conn.createStatement()
-.execute("CREATE TABLE " + fullTableName +" ( k VARCHAR, c1.a 
bigint,c2.b bigint CONSTRAINT pk PRIMARY KEY (k))" + tableDDLOptions );
-conn.createStatement().execute("upsert into " + fullTableName +" 
values ('abc',1,3)");
-conn.createStatement().execute("upsert into " + fullTableName +" 
values ('def',2,4)");
+.execute("CREATE TABLE " + fullTableName
++ " ( k VARCHAR, c1.a bigint,c2.b bigint CONSTRAINT pk 
PRIMARY KEY (k)) "
++ (splitKey != null ? "split on (" + splitKey + ")" : 
""));
+conn.createStatement().execute("upsert into " + fullTableName + " 
values ('abc',1,3)");
+conn.createStatement().execute("upsert into " + fullTableName + " 
values ('def',2,4)");
 conn.commit();
-// CAll the update statistics query here
 stmt = conn.prepareStatement("UPDATE STATISTICS " + fullTableName);
 stmt.execute();
-rs = conn.createStatement().executeQuer

[35/50] [abbrv] phoenix git commit: PHOENIX-2631 Exception when parsing boundary timestamp values

2016-02-24 Thread maryannxue
PHOENIX-2631 Exception when parsing boundary timestamp values


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/818683aa
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/818683aa
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/818683aa

Branch: refs/heads/calcite
Commit: 818683aac866fe31b48135209158c84942a80fb6
Parents: 9e2b433
Author: James Taylor 
Authored: Tue Feb 16 13:01:45 2016 -0800
Committer: James Taylor 
Committed: Wed Feb 17 09:14:13 2016 -0800

--
 .../end2end/ClientTimeArithmeticQueryIT.java|  11 +-
 .../org/apache/phoenix/end2end/DateTimeIT.java  |   5 +-
 .../apache/phoenix/end2end/DistinctCountIT.java |   2 +-
 .../apache/phoenix/end2end/PercentileIT.java|   2 +-
 .../phoenix/end2end/ProductMetricsIT.java   |   6 +-
 .../phoenix/end2end/RowValueConstructorIT.java  |   4 +-
 .../phoenix/end2end/VariableLengthPKIT.java |   2 +-
 .../end2end/index/IndexExpressionIT.java|   7 +-
 .../apache/phoenix/end2end/index/IndexIT.java   |   5 +-
 .../apache/phoenix/schema/types/PDataType.java  |   3 +
 .../apache/phoenix/schema/types/PTimestamp.java | 407 +++
 .../phoenix/compile/WhereOptimizerTest.java |   5 +-
 .../java/org/apache/phoenix/query/BaseTest.java |   7 +-
 .../org/apache/phoenix/query/QueryPlanTest.java |  24 +-
 .../phoenix/schema/types/PDataTypeTest.java |  60 +++
 .../java/org/apache/phoenix/util/TestUtil.java  |   7 +-
 16 files changed, 344 insertions(+), 213 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/818683aa/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
index e617673..8347370 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
@@ -19,9 +19,9 @@
  */
 package org.apache.phoenix.end2end;
 
+import static org.apache.phoenix.query.QueryConstants.MILLIS_IN_DAY;
 import static org.apache.phoenix.util.TestUtil.B_VALUE;
 import static org.apache.phoenix.util.TestUtil.E_VALUE;
-import static org.apache.phoenix.util.TestUtil.MILLIS_IN_DAY;
 import static org.apache.phoenix.util.TestUtil.ROW1;
 import static org.apache.phoenix.util.TestUtil.ROW2;
 import static org.apache.phoenix.util.TestUtil.ROW3;
@@ -51,7 +51,6 @@ import java.util.Properties;
 
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.TestUtil;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -614,10 +613,10 @@ public class ClientTimeArithmeticQueryIT extends 
BaseQueryIT {
 statement.setDate(3, date);
 statement.execute();
 statement.setString(2, ROW4);
-statement.setDate(3, new Date(date.getTime() + TestUtil.MILLIS_IN_DAY 
- 1));
+statement.setDate(3, new Date(date.getTime() + MILLIS_IN_DAY - 1));
 statement.execute();
 statement.setString(2, ROW6);
-statement.setDate(3, new Date(date.getTime() + TestUtil.MILLIS_IN_DAY 
- 1));
+statement.setDate(3, new Date(date.getTime() + MILLIS_IN_DAY - 1));
 statement.execute();
 statement.setString(2, ROW9);
 statement.setDate(3, date);
@@ -738,7 +737,7 @@ public class ClientTimeArithmeticQueryIT extends 
BaseQueryIT {
   conn = DriverManager.getConnection(getUrl(), props);
   rs = conn.createStatement().executeQuery("SELECT ts + 1 FROM 
time_table");
   assertTrue(rs.next());
-  assertEquals(time.getTime() + 
TestUtil.MILLIS_IN_DAY,rs.getTimestamp(1).getTime());
+  assertEquals(time.getTime() + 
MILLIS_IN_DAY,rs.getTimestamp(1).getTime());
 }
 
 @Test
@@ -772,7 +771,7 @@ public class ClientTimeArithmeticQueryIT extends 
BaseQueryIT {
   conn = DriverManager.getConnection(getUrl(), props);
   rs = conn.createStatement().executeQuery("SELECT ts - 1 FROM 
time_table");
   assertTrue(rs.next());
-  assertEquals(time.getTime() - 
TestUtil.MILLIS_IN_DAY,rs.getTimestamp(1).getTime());
+  assertEquals(time.getTime() - 
MILLIS_IN_DAY,rs.getTimestamp(1).getTime());
 }
  
 @Test

http://git-wip-us.apache.org/repos/asf/phoenix/blob/818683aa/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java 
b/phoenix-core/src/it/java/org/apache/ph

[27/50] [abbrv] phoenix git commit: PHOENIX-2684 LiteralExpression.getBooleanLiteralExpression should compare with .equals() (Julian Eberius)

2016-02-24 Thread maryannxue
PHOENIX-2684 LiteralExpression.getBooleanLiteralExpression should compare with 
.equals() (Julian Eberius)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/60ef7cd5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/60ef7cd5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/60ef7cd5

Branch: refs/heads/calcite
Commit: 60ef7cd54e26fd1635e503c7d7981ba2cdf4c6fc
Parents: 43b34da
Author: James Taylor 
Authored: Mon Feb 15 09:50:43 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 10:14:58 2016 -0800

--
 .../phoenix/end2end/CompareDecimalToLongIT.java | 241 --
 .../apache/phoenix/end2end/PrimitiveTypeIT.java | 245 +++
 .../phoenix/expression/LiteralExpression.java   |   2 +-
 .../java/org/apache/phoenix/query/BaseTest.java |  14 +-
 4 files changed, 252 insertions(+), 250 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/60ef7cd5/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
deleted file mode 100644
index 3a358c4..000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CompareDecimalToLongIT.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.util.Properties;
-
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.junit.Test;
-
-
-public class CompareDecimalToLongIT extends BaseClientManagedTimeIT {
-protected static void initTableValues(byte[][] splits, long ts) throws 
Exception {
-ensureTableCreated(getUrl(),"LongInKeyTest",splits, ts-2);
-
-// Insert all rows at ts
-String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ ts;
-Connection conn = DriverManager.getConnection(url);
-conn.setAutoCommit(true);
-PreparedStatement stmt = conn.prepareStatement(
-"upsert into " +
-"LongInKeyTest VALUES(?)");
-stmt.setLong(1, 2);
-stmt.execute();
-conn.close();
-}
-
-@Test
-public void testCompareLongGTDecimal() throws Exception {
-long ts = nextTimestamp();
-initTableValues(null, ts);
-String query = "SELECT l FROM LongInKeyTest where l > 1.5";
-Properties props = new Properties();
-props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 2)); // Execute at timestamp 2
-Connection conn = DriverManager.getConnection(getUrl(), props);
-try {
-PreparedStatement statement = conn.prepareStatement(query);
-ResultSet rs = statement.executeQuery();
-assertTrue (rs.next());
-assertEquals(2, rs.getLong(1));
-assertFalse(rs.next());
-} finally {
-conn.close();
-}
-}
-
-@Test
-public void testCompareLongGTEDecimal() throws Exception {
-long ts = nextTimestamp();
-initTableValues(null, ts);
-String query = "SELECT l FROM LongInKeyTest where l >= 1.5";
-Properties props = new Properties();
-props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 2)); // Execute at timestamp 2
-Connection conn = DriverManager.getConnection(getUrl(), props);
-try {
-PreparedStatement statement = conn.prepareStatement(query);
-ResultSet rs = statement.executeQuery();
-/*
- *  Failing because we're not converting the constant to the type 
of the R

[25/50] [abbrv] phoenix git commit: PHOENIX-2221 Option to make data regions not writable when index regions are not available (Alicia Ying Shu, James Taylor)

2016-02-24 Thread maryannxue
PHOENIX-2221 Option to make data regions not writable when index regions are 
not available (Alicia Ying Shu, James Taylor)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e2a6386f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e2a6386f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e2a6386f

Branch: refs/heads/calcite
Commit: e2a6386f3b9343aec74c5f96f0e0124e80b9f8b1
Parents: 6881aef
Author: James Taylor 
Authored: Sun Feb 14 09:06:14 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 00:33:18 2016 -0800

--
 .../end2end/index/MutableIndexFailureIT.java|  31 +-
 .../end2end/index/ReadOnlyIndexFailureIT.java   | 289 +++
 .../apache/phoenix/compile/FromCompiler.java|   2 +-
 .../apache/phoenix/compile/JoinCompiler.java|   2 +-
 .../compile/TupleProjectionCompiler.java|   4 +-
 .../apache/phoenix/compile/UnionCompiler.java   |   2 +-
 .../coprocessor/MetaDataEndpointImpl.java   |  92 +++---
 .../coprocessor/MetaDataRegionObserver.java |  27 +-
 .../coprocessor/generated/PTableProtos.java | 103 ++-
 .../phoenix/exception/SQLExceptionCode.java |   2 +
 .../apache/phoenix/execute/MutationState.java   |  39 ++-
 .../index/write/DelegateIndexFailurePolicy.java |  58 
 .../index/PhoenixIndexFailurePolicy.java|  48 ++-
 .../org/apache/phoenix/query/QueryServices.java |   3 +
 .../phoenix/query/QueryServicesOptions.java |   1 +
 .../apache/phoenix/schema/DelegateTable.java|   5 +
 .../apache/phoenix/schema/MetaDataClient.java   |  38 +--
 .../java/org/apache/phoenix/schema/PTable.java  |   1 +
 .../org/apache/phoenix/schema/PTableImpl.java   |  51 ++--
 .../phoenix/execute/CorrelatePlanTest.java  |   2 +-
 phoenix-protocol/src/main/PTable.proto  |   1 +
 21 files changed, 660 insertions(+), 141 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e2a6386f/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 5f39515..176c5a0 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -172,7 +172,7 @@ public class MutableIndexFailureIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 TableName indexTable =
 TableName.valueOf(localIndex ? MetaDataUtil
 .getLocalIndexTableName(fullTableName) : 
fullIndexName);
-HBaseAdmin admin = this.getUtility().getHBaseAdmin();
+HBaseAdmin admin = getUtility().getHBaseAdmin();
 HTableDescriptor indexTableDesc = 
admin.getTableDescriptor(indexTable);
 try{
 admin.disableTable(indexTable);
@@ -184,20 +184,10 @@ public class MutableIndexFailureIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 stmt.setString(2, "x2");
 stmt.setString(3, "2");
 stmt.execute();
-if (transactional) {
-try {
-conn.commit();
-fail();
-} catch (SQLException e) {
-conn.rollback();
-}
-}
-else {
-try {
-conn.commit();
-fail();
-} catch (SQLException e) {
-}
+try {
+conn.commit();
+fail();
+} catch (SQLException e) {
 }
 
 // Verify the metadata for index is correct.
@@ -341,9 +331,9 @@ public class MutableIndexFailureIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 // find a RS which doesn't has CATALOG table
 TableName catalogTable = TableName.valueOf("SYSTEM.CATALOG");
 TableName indexTable = TableName.valueOf(fullIndexName);
-final HBaseCluster cluster = this.getUtility().getHBaseCluster();
+final HBaseCluster cluster = getUtility().getHBaseCluster();
 Collection rss = 
cluster.getClusterStatus().getServers();
-HBaseAdmin admin = this.getUtility().getHBaseAdmin();
+HBaseAdmin admin = getUtility().getHBaseAdmin();
 List regions = admin.getTableRegions(catalogTable);
 ServerName catalogRS = 
cluster.getServerHoldingRegion(regions.get(0).getTable(),
 regions.get(0).getRegionName());
@@ -363,7 +353,7 @@ public class MutableIndexFailureIT extends 
BaseOwnClus

[01/50] [abbrv] phoenix git commit: PHOENIX-2649 - GC/OOM during BulkLoad

2016-02-24 Thread maryannxue
Repository: phoenix
Updated Branches:
  refs/heads/calcite 69a2f2437 -> 9ac854ae5


PHOENIX-2649 - GC/OOM during BulkLoad


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/12f6a6f4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/12f6a6f4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/12f6a6f4

Branch: refs/heads/calcite
Commit: 12f6a6f48c4a4739e09f1842d885d53e2e5550e2
Parents: 5c25a72
Author: ravimagham 
Authored: Wed Feb 3 14:15:16 2016 -0800
Committer: ravimagham 
Committed: Wed Feb 3 14:15:16 2016 -0800

--
 .../mapreduce/bulkload/TableRowkeyPair.java | 41 +++-
 .../mapreduce/bulkload/TestTableRowkeyPair.java | 67 
 2 files changed, 75 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/12f6a6f4/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/bulkload/TableRowkeyPair.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/bulkload/TableRowkeyPair.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/bulkload/TableRowkeyPair.java
index 412226f..e3032be 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/bulkload/TableRowkeyPair.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/bulkload/TableRowkeyPair.java
@@ -84,6 +84,13 @@ public class TableRowkeyPair implements 
WritableComparable {
 WritableUtils.writeString(output,tableName);
 rowkey.write(output);
 }
+
+@Override
+public int hashCode() {
+int result = this.tableName.hashCode();
+result = 31 * result + this.rowkey.hashCode();
+return result;
+}
 
 @Override
 public int compareTo(TableRowkeyPair other) {
@@ -95,40 +102,8 @@ public class TableRowkeyPair implements 
WritableComparable {
 }
 }
 
-/** Comparator optimized for TableRowkeyPair. */
-public static class Comparator extends WritableComparator {
-private BytesWritable.Comparator comparator = new 
BytesWritable.Comparator();
-
-public Comparator() {
-super(TableRowkeyPair.class);
-}
-
-@Override
-public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int 
l2) {
-try {
-int vintL1 = WritableUtils.decodeVIntSize(b1[s1]);
-int vintL2 = WritableUtils.decodeVIntSize(b2[s2]);
-int strL1 = readVInt(b1, s1);
-int strL2 = readVInt(b2, s2);
-int cmp = compareBytes(b1, s1 + vintL1, strL1, b2, s2 + 
vintL2, strL2);
-if (cmp != 0) {
-  return cmp;
-}
-int vintL3 = WritableUtils.decodeVIntSize(b1[s1 + vintL1 + 
strL1]);
-int vintL4 = WritableUtils.decodeVIntSize(b2[s2 + vintL2 + 
strL2]);
-int strL3 = readVInt(b1, s1 + vintL1 + strL1);
-int strL4 = readVInt(b2, s2 + vintL2 + strL2);
-return comparator.compare(b1, s1 + vintL1 + strL1 + vintL3, 
strL3, b2, s2
-+ vintL2 + strL2 + vintL4, strL4);
-
-} catch(Exception ex) {
-throw new IllegalArgumentException(ex);
-}
-}
-}
- 
 static { 
-WritableComparator.define(TableRowkeyPair.class, new Comparator());
+WritableComparator.define(TableRowkeyPair.class, new 
BytesWritable.Comparator());
 }
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/12f6a6f4/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/bulkload/TestTableRowkeyPair.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/bulkload/TestTableRowkeyPair.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/bulkload/TestTableRowkeyPair.java
new file mode 100644
index 000..1fee4bb
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/bulkload/TestTableRowkeyPair.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 

[43/50] [abbrv] phoenix git commit: PHOENIX-2696 Delete stale stats for a region if in later run no guidePosts found for that region(Ankit Singhal)

2016-02-24 Thread maryannxue
PHOENIX-2696 Delete stale stats for a region if in later run no guidePosts 
found for that region(Ankit Singhal)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9dd6babd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9dd6babd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9dd6babd

Branch: refs/heads/calcite
Commit: 9dd6babd86186242b55d01afcb857aa02497ea95
Parents: c2cc1be
Author: Ankit Singhal 
Authored: Fri Feb 19 22:28:07 2016 +0530
Committer: Ankit Singhal 
Committed: Fri Feb 19 22:28:07 2016 +0530

--
 .../phoenix/schema/stats/DefaultStatisticsCollector.java | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9dd6babd/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
index 96b35f1..cb6f5d4 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
@@ -126,6 +126,14 @@ class DefaultStatisticsCollector implements 
StatisticsCollector {
 throws IOException {
 try {
 // update the statistics table
+// Delete statistics for a region if no guidepost is collected for 
that region during UPDATE STATISTICS
+// This will not impact a stats collection of single column family 
during compaction as
+// guidePostsInfoWriterMap cannot be empty in this case.
+if (guidePostsInfoWriterMap.keySet().isEmpty()) {
+for (Store store : region.getStores()) {
+statsTable.deleteStats(region, this, new 
ImmutableBytesPtr(store.getFamily().getName()), mutations);
+}
+}
 for (ImmutableBytesPtr fam : guidePostsInfoWriterMap.keySet()) {
 if (delete) {
 if (logger.isDebugEnabled()) {



[10/50] [abbrv] phoenix git commit: PHOENIX-2655 In MetadataClient creatTableInternal if NEWER_TABLE_FOUND swallow NewerTableAlreadyExistsException if the ifNotExists flag is true

2016-02-24 Thread maryannxue
PHOENIX-2655 In MetadataClient creatTableInternal if NEWER_TABLE_FOUND swallow 
NewerTableAlreadyExistsException if the ifNotExists flag is true


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1c3a86d3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1c3a86d3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1c3a86d3

Branch: refs/heads/calcite
Commit: 1c3a86d3139804c5c2e8a51a8e02bd3ecbd59515
Parents: e5e9144
Author: Thomas D'Silva 
Authored: Mon Feb 8 13:29:55 2016 -0800
Committer: Thomas D'Silva 
Committed: Mon Feb 8 14:38:47 2016 -0800

--
 .../src/main/java/org/apache/phoenix/schema/MetaDataClient.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c3a86d3/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index ac2062a..0456335 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -2158,7 +2158,9 @@ public class MetaDataClient {
 case NEWER_TABLE_FOUND:
 // Add table to ConnectionQueryServices so it's cached, but 
don't add
 // it to this connection as we can't see it.
-throw new NewerTableAlreadyExistsException(schemaName, 
tableName, result.getTable());
+if (!statement.ifNotExists()) {
+throw new NewerTableAlreadyExistsException(schemaName, 
tableName, result.getTable());
+}
 case UNALLOWED_TABLE_MUTATION:
 throw new 
SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE)
 
.setSchemaName(schemaName).setTableName(tableName).build().buildException();



[40/50] [abbrv] phoenix git commit: PHOENIX-2691 Exception while unpacking resultset containing VARCHAR ARRAY of unspecified length

2016-02-24 Thread maryannxue
PHOENIX-2691 Exception while unpacking resultset containing VARCHAR ARRAY of 
unspecified length


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cac03056
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cac03056
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cac03056

Branch: refs/heads/calcite
Commit: cac03056578170a82ba812aa4648e0e5b1a1bbb6
Parents: 45a9d67
Author: James Taylor 
Authored: Thu Feb 18 14:59:29 2016 -0800
Committer: James Taylor 
Committed: Thu Feb 18 15:14:48 2016 -0800

--
 .../apache/phoenix/end2end/GroupByCaseIT.java   | 35 +
 .../apache/phoenix/compile/GroupByCompiler.java | 74 
 .../phoenix/exception/SQLExceptionCode.java |  5 +-
 .../java/org/apache/phoenix/util/IndexUtil.java |  6 +-
 .../phoenix/compile/QueryCompilerTest.java  | 36 ++
 5 files changed, 122 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cac03056/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
index 0f1568c..172f9f7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
@@ -226,4 +226,39 @@ public class GroupByCaseIT extends BaseHBaseManagedTimeIT {
 conn.close();
 }
 
+
+@Test
+public void testGroupByArray() throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.createStatement().execute("CREATE TABLE test1(\n" + 
+"  a VARCHAR NOT NULL,\n" + 
+"  b VARCHAR,\n" + 
+"  c INTEGER,\n" + 
+"  d VARCHAR,\n" + 
+"  e VARCHAR ARRAY,\n" + 
+"  f BIGINT,\n" + 
+"  g BIGINT,\n" + 
+"  CONSTRAINT pk PRIMARY KEY(a)\n" + 
+")");
+conn.createStatement().execute("UPSERT INTO test1 VALUES('1', 'val', 
100, 'a', ARRAY ['b'], 1, 2)");
+conn.createStatement().execute("UPSERT INTO test1 VALUES('2', 'val', 
100, 'a', ARRAY ['b'], 3, 4)");
+conn.createStatement().execute("UPSERT INTO test1 VALUES('3', 'val', 
100, 'a', ARRAY ['b','c'], 5, 6)");
+conn.commit();
+
+ResultSet rs = conn.createStatement().executeQuery("SELECT c, SUM(f + 
g) AS sumone, d, e\n" + 
+"FROM test1\n" + 
+"WHERE b = 'val'\n" + 
+"  AND a IN ('1','2','3')\n" + 
+"GROUP BY c, d, e\n" + 
+"ORDER BY sumone DESC");
+assertTrue(rs.next());
+assertEquals(100, rs.getInt(1));
+assertEquals(11, rs.getLong(2));
+assertTrue(rs.next());
+assertEquals(100, rs.getInt(1));
+assertEquals(10, rs.getLong(2));
+assertFalse(rs.next());
+conn.close();
+}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cac03056/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
index 7d9df02..85478bf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
@@ -38,8 +38,8 @@ import org.apache.phoenix.parse.SelectStatement;
 import org.apache.phoenix.schema.AmbiguousColumnException;
 import org.apache.phoenix.schema.ColumnNotFoundException;
 import org.apache.phoenix.schema.types.PDataType;
-import org.apache.phoenix.schema.types.PDecimal;
-import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.schema.types.PVarbinary;
+import org.apache.phoenix.util.IndexUtil;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
@@ -217,34 +217,53 @@ public class GroupByCompiler {
 public int compare(Pair gb1, 
Pair gb2) {
 Expression e1 = gb1.getSecond();
 Expression e2 = gb2.getSecond();
-boolean isFixed1 = e1.getDataType().isFixedWidth();
-boolean isFixed2 = e2.getDataType().isFixedWidth();
+PDataType t1 = e1.getDataType();
+PDataType t2 = e2.getDataType();
+boolean isFixed1 = t1.isFixedWidth();
+ 

[24/50] [abbrv] phoenix git commit: PHOENIX-2221 Option to make data regions not writable when index regions are not available (Alicia Ying Shu, James Taylor)

2016-02-24 Thread maryannxue
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e2a6386f/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index f5c9295..abd31c0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -66,6 +66,8 @@ import org.apache.phoenix.util.SizedUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TrustedByteArrayOutputStream;
 
+import co.cask.tephra.TxConstants;
+
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ArrayListMultimap;
@@ -78,8 +80,6 @@ import com.google.common.collect.Maps;
 import com.google.protobuf.HBaseZeroCopyByteString;
 import com.sun.istack.NotNull;
 
-import co.cask.tephra.TxConstants;
-
 /**
  *
  * Base class for PTable implementors.  Provides abstraction for
@@ -101,6 +101,7 @@ public class PTableImpl implements PTable {
 private PIndexState state;
 private long sequenceNumber;
 private long timeStamp;
+private long indexDisableTimestamp;
 // Have MultiMap for String->PColumn (may need family qualifier)
 private List pkColumns;
 private List allColumns;
@@ -207,7 +208,7 @@ public class PTableImpl implements PTable {
 table.getSequenceNumber(), table.getPKName(), 
table.getBucketNum(), getColumnsToClone(table), parentSchemaName, 
table.getParentTableName(),
 indexes, table.isImmutableRows(), table.getPhysicalNames(), 
table.getDefaultFamilyName(), viewStatement,
 table.isWALDisabled(), table.isMultiTenant(), 
table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), 
table.getIndexType(),
-table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency());
+table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp());
 }
 
 public static PTableImpl makePTable(PTable table, List columns) 
throws SQLException {
@@ -216,7 +217,7 @@ public class PTableImpl implements PTable {
 table.getSequenceNumber(), table.getPKName(), 
table.getBucketNum(), columns, table.getParentSchemaName(), 
table.getParentTableName(),
 table.getIndexes(), table.isImmutableRows(), 
table.getPhysicalNames(), table.getDefaultFamilyName(), 
table.getViewStatement(),
 table.isWALDisabled(), table.isMultiTenant(), 
table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), 
table.getIndexType(),
-table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency());
+table.getTableStats(), table.getBaseColumnCount(), 
table.rowKeyOrderOptimizable(), table.isTransactional(), 
table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp());
 }
 
 public static PTableImpl makePTable(PTable table, long timeStamp, long 
sequenceNumber, List columns) throws SQLException {
@@ -225,7 +226,7 @@ public class PTableImpl implements PTable {
 sequenceNumber, table.getPKName(), table.getBucketNum(), 
columns, table.getParentSchemaName(), table.getParentTableName(), 
table.getIndexes(),
 table.isImmutableRows(), table.getPhysicalNames(), 
table.getDefaultFamilyName(), table.getViewStatement(), table.isWALDisabled(),
 table.isMultiTenant(), table.getStoreNulls(), 
table.getViewType(), table.getViewIndexId(), table.getIndexType(), 
table.getTableStats(),
-table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), 
table.isTransactional(), table.getUpdateCacheFrequency());
+table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), 
table.isTransactional(), table.getUpdateCacheFrequency(), 
table.getIndexDisableTimestamp());
 }
 
 public static PTableImpl makePTable(PTable table, long timeStamp, long 
sequenceNumber, List columns, boolean isImmutableRows) throws 
SQLException {
@@ -234,7 +235,7 @@ public class PTableImpl implements PTable {
 sequenceNumber, table.getPKName(), table.getBucketNum(), 
columns, table.getParentSchemaName(), table.getParentTableName(),
 table.getIndexes(), isImmutableRows, table.getPhysicalNames(), 
table.getDefaultFamilyName(), table.getViewStatement(),
 table.isWALDisabled(), table.isMultiTenant(), 
table.getStoreNulls(), table.getViewType(), table.getViewIndexId(),
-table.getIndexType(), table.getTableStats(), 
table.getBaseColumnCoun

[03/50] [abbrv] phoenix git commit: PHOENIX-2649 - GC/OOM during BulkLoad (Sergey Soldatov)

2016-02-24 Thread maryannxue
PHOENIX-2649 - GC/OOM during BulkLoad (Sergey Soldatov)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a82a0ff6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a82a0ff6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a82a0ff6

Branch: refs/heads/calcite
Commit: a82a0ff608f90c470a1290296f8ed08243956fc9
Parents: 7972422
Author: ravimagham 
Authored: Thu Feb 4 14:38:43 2016 -0800
Committer: ravimagham 
Committed: Thu Feb 4 14:38:43 2016 -0800

--
 .../mapreduce/bulkload/TableRowkeyPair.java | 38 +---
 .../mapreduce/bulkload/TestTableRowkeyPair.java |  6 ++--
 2 files changed, 37 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a82a0ff6/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/bulkload/TableRowkeyPair.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/bulkload/TableRowkeyPair.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/bulkload/TableRowkeyPair.java
index e3032be..ac80341 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/bulkload/TableRowkeyPair.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/bulkload/TableRowkeyPair.java
@@ -22,7 +22,7 @@ import java.io.DataOutput;
 import java.io.IOException;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.WritableComparator;
 import org.apache.hadoop.io.WritableUtils;
@@ -30,6 +30,7 @@ import org.apache.hadoop.io.WritableUtils;
 import com.google.common.base.Preconditions;
 
 
+
 /**
  * A WritableComparable to hold the table name and the rowkey.
  */
@@ -101,9 +102,38 @@ public class TableRowkeyPair implements 
WritableComparable {
 return this.tableName.compareTo(otherTableName);
 }
 }
-
-static { 
-WritableComparator.define(TableRowkeyPair.class, new 
BytesWritable.Comparator());
+
+/** Comparator for TableRowkeyPair. */
+public static class Comparator extends WritableComparator {
+
+public Comparator() {
+super(TableRowkeyPair.class);
+}
+
+@Override
+public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int 
l2) {
+try {
+// Compare table names
+int strL1 = readInt(b1, s1);
+int strL2 = readInt(b2, s2);
+int cmp = compareBytes(b1, s1 + Bytes.SIZEOF_INT, strL1, b2, 
s2 + Bytes.SIZEOF_INT, strL2);
+if (cmp != 0) {
+return cmp;
+}
+// Compare row keys
+int strL3 = readInt(b1, s1 + Bytes.SIZEOF_INT + strL1);
+int strL4 = readInt(b2, s2 + Bytes.SIZEOF_INT + strL2);
+int i = compareBytes(b1, s1 + Bytes.SIZEOF_INT*2 + strL1, 
strL3, b2, s2
++ Bytes.SIZEOF_INT*2 + strL2, strL4);
+return i;
+} catch(Exception ex) {
+throw new IllegalArgumentException(ex);
+}
+}
+}
+
+static {
+WritableComparator.define(TableRowkeyPair.class, new Comparator());
 }
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a82a0ff6/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/bulkload/TestTableRowkeyPair.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/bulkload/TestTableRowkeyPair.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/bulkload/TestTableRowkeyPair.java
index 1fee4bb..2a29c00 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/bulkload/TestTableRowkeyPair.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/bulkload/TestTableRowkeyPair.java
@@ -23,7 +23,6 @@ import java.io.IOException;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.io.BytesWritable;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -38,8 +37,9 @@ public class TestTableRowkeyPair {
 testsRowsKeys("first", "aa", "first", "ab", -1);
 testsRowsKeys("second", "aa", "first", "aa", 1);
 testsRowsKeys("first", "aa", "first", "aaa", -1);
+testsRowsKeys("first","bb", "first", "", 1);
 }
-
+
 private void testsRowsKeys(String aTable, String akey, String bTable, 
String bkey, int expectedSignum) throws IOException {
 
 final ImmutableBytesWritable arowkey = new 
ImmutableBytesWritable(Bytes.toBytes(akey));

[47/50] [abbrv] phoenix git commit: PHOENIX-2702 Show estimate rows and bytes touched in explain plan.

2016-02-24 Thread maryannxue
PHOENIX-2702 Show estimate rows and bytes touched in explain plan.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e4acd0cd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e4acd0cd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e4acd0cd

Branch: refs/heads/calcite
Commit: e4acd0cda4ad8f12db8331af39a9e32f4b81c223
Parents: d9d66ae
Author: Lars Hofhansl 
Authored: Sun Feb 21 22:32:09 2016 -0800
Committer: Lars Hofhansl 
Committed: Sun Feb 21 22:32:09 2016 -0800

--
 .../apache/phoenix/iterate/BaseResultIterators.java   | 14 +-
 .../java/org/apache/phoenix/iterate/ExplainTable.java |  2 +-
 .../phoenix/schema/stats/GuidePostsInfoBuilder.java   |  2 +-
 3 files changed, 15 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e4acd0cd/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 01b790a..fc3edbe 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -120,6 +120,8 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 private final ParallelScanGrouper scanGrouper;
 // TODO: too much nesting here - breakup into new classes.
 private final List 
allFutures;
+private long estimatedRows;
+private long estimatedSize;
 
 static final Function TO_KEY_RANGE = new 
Function() {
 @Override
@@ -558,6 +560,8 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 while (guideIndex < gpsSize && 
(currentGuidePost.compareTo(endKey) <= 0 || endKey.length == 0)) {
 Scan newScan = scanRanges.intersectScan(scan, 
currentKeyBytes, currentGuidePostBytes, keyOffset,
 false);
+estimatedRows += gps.getRowCounts().get(guideIndex);
+estimatedSize += gps.getByteCounts().get(guideIndex);
 scans = addNewScan(parallelScans, scans, newScan, 
currentGuidePostBytes, false, regionLocation);
 currentKeyBytes = currentGuidePost.copyBytes();
 currentGuidePost = PrefixByteCodec.decode(decoder, input);
@@ -851,7 +855,15 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB,
 QueryServicesOptions.DEFAULT_EXPLAIN_CHUNK_COUNT);
 StringBuilder buf = new StringBuilder();
-buf.append("CLIENT " + (displayChunkCount ? (this.splits.size() + 
"-CHUNK ") : "") + getName() + " " + size() + "-WAY ");
+buf.append("CLIENT ");
+if (displayChunkCount) {
+buf.append(this.splits.size()).append("-CHUNK ");
+if (estimatedRows > 0) {
+buf.append(estimatedRows).append(" ROWS ");
+buf.append(estimatedSize).append(" BYTES ");
+}
+}
+buf.append(getName()).append(" ").append(size()).append("-WAY ");
 explain(buf.toString(),planSteps);
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e4acd0cd/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
index b319914..1b623a4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
@@ -120,7 +120,7 @@ public abstract class ExplainTable {
 } else {
 explainSkipScan(buf);
 }
-buf.append("OVER " + 
tableRef.getTable().getPhysicalName().getString());
+buf.append("OVER 
").append(tableRef.getTable().getPhysicalName().getString());
 if (!scanRanges.isPointLookup()) {
 appendKeyRanges(buf);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e4acd0cd/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfoBuilder.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfoBuilder.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfoBuilder.java
index c85b1d6..2133349 100644
--- 
a/phoenix-core/s

[38/50] [abbrv] phoenix git commit: PHOENIX-2676 Cannot support join operations in scans with limit

2016-02-24 Thread maryannxue
PHOENIX-2676 Cannot support join operations in scans with limit


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7d90e882
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7d90e882
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7d90e882

Branch: refs/heads/calcite
Commit: 7d90e88261dfc4ef30544ad7d92fa8c66653df56
Parents: 5127a65
Author: James Taylor 
Authored: Wed Feb 17 12:34:48 2016 -0800
Committer: James Taylor 
Committed: Wed Feb 17 12:34:48 2016 -0800

--
 .../org/apache/phoenix/coprocessor/HashJoinRegionScanner.java   | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7d90e882/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
index 8f64b55..bd9c5ec 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.NoLimitScannerContext;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -274,7 +273,7 @@ public class HashJoinRegionScanner implements RegionScanner 
{
 try {
 while (shouldAdvance()) {
 hasMore = scanner.nextRaw(result, scannerContext);
-processResults(result, scannerContext != 
NoLimitScannerContext.getInstance());
+processResults(result, false); // TODO detect if limit used 
here
 result.clear();
 }
 
@@ -316,7 +315,7 @@ public class HashJoinRegionScanner implements RegionScanner 
{
 try {
 while (shouldAdvance()) {
 hasMore = scanner.next(result, scannerContext);
-processResults(result, scannerContext != 
NoLimitScannerContext.getInstance());
+processResults(result, false); // TODO detect if limit used 
here
 result.clear();
 }
 



[05/50] [abbrv] phoenix git commit: PHOENIX-2647 Fix errors in 2 tests related to DDL issues for transaction (addendum)

2016-02-24 Thread maryannxue
PHOENIX-2647 Fix errors in 2 tests related to DDL issues for transaction 
(addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fa58fc5f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fa58fc5f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fa58fc5f

Branch: refs/heads/calcite
Commit: fa58fc5fccd11cbeb14e701a59122b3cba77d67e
Parents: b64edb7
Author: Ankit Singhal 
Authored: Sat Feb 6 01:11:22 2016 +0530
Committer: Ankit Singhal 
Committed: Sat Feb 6 01:11:22 2016 +0530

--
 .../src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa58fc5f/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
index 4450152..e72f41f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
@@ -131,8 +131,8 @@ public class StatsCollectorIT extends 
StatsCollectorAbstractIT {
 conn = DriverManager.getConnection(getUrl(), props);
 conn.createStatement()
 .execute("CREATE TABLE " + fullTableName
-+ " ( k VARCHAR, c1.a bigint,c2.b bigint CONSTRAINT pk 
PRIMARY KEY (k)) "
-+ (splitKey != null ? "split on (" + splitKey + ")" : 
""));
++ " ( k VARCHAR, c1.a bigint,c2.b bigint CONSTRAINT pk 
PRIMARY KEY (k))"+ tableDDLOptions
++ (splitKey != null ? " split on (" + splitKey + ")" : 
"") );
 conn.createStatement().execute("upsert into " + fullTableName + " 
values ('abc',1,3)");
 conn.createStatement().execute("upsert into " + fullTableName + " 
values ('def',2,4)");
 conn.commit();



[50/50] [abbrv] phoenix git commit: 1. Expose and correct (new issue to be opened) the byte estimate interface from ResultIterators and apply it in costing; 2. Remove walk-around for PHOENIX-2647 and

2016-02-24 Thread maryannxue
1. Expose and correct (new issue to be opened) the byte estimate interface from 
ResultIterators and apply it in costing; 2. Remove walk-around for PHOENIX-2647 
and verify the tests all work; 3. Apply temporary fix for PHOENIX-2712


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9ac854ae
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9ac854ae
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9ac854ae

Branch: refs/heads/calcite
Commit: 9ac854ae5335a00d2a61ec6f1929ebc64e7137e2
Parents: bc4b891
Author: maryannxue 
Authored: Wed Feb 24 16:12:25 2016 -0500
Committer: maryannxue 
Committed: Wed Feb 24 16:12:25 2016 -0500

--
 .../apache/phoenix/calcite/BaseCalciteIT.java   |  52 --
 .../org/apache/phoenix/calcite/CalciteIT.java   | 163 ++-
 .../apache/phoenix/calcite/CalciteIndexIT.java  |  36 ++--
 .../phoenix/calcite/rel/PhoenixTableScan.java   |  74 +
 .../org/apache/phoenix/execute/ScanPlan.java|   5 +
 .../phoenix/iterate/BaseResultIterators.java|  12 +-
 .../apache/phoenix/iterate/ResultIterators.java |   1 +
 .../phoenix/iterate/UnionResultIterators.java   |   5 +
 .../apache/phoenix/schema/MetaDataClient.java   |   3 +
 .../iterate/AggregateResultScannerTest.java |   5 +
 .../iterate/ConcatResultIteratorTest.java   |  10 ++
 .../iterate/MergeSortResultIteratorTest.java|  15 ++
 12 files changed, 204 insertions(+), 177 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9ac854ae/phoenix-core/src/it/java/org/apache/phoenix/calcite/BaseCalciteIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/calcite/BaseCalciteIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/calcite/BaseCalciteIT.java
index 35c46e7..65a9c6e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/calcite/BaseCalciteIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/calcite/BaseCalciteIT.java
@@ -168,14 +168,28 @@ public class BaseCalciteIT extends 
BaseClientManagedTimeIT {
 start.close();
 }
 
-public Sql resultIs(boolean ordered, Object[][] expected) throws 
SQLException {
+public Sql resultIs(Object[][] expected) throws SQLException {
 final Statement statement = 
start.getConnection().createStatement();
 final ResultSet resultSet = statement.executeQuery(sql);
-if (ordered) {
-checkResultOrdered(resultSet, expected);
-} else {
-checkResultUnordered(resultSet, expected);
-}
+checkResultOrdered(resultSet, expected);
+resultSet.close();
+statement.close();
+return this;
+}
+
+public Sql resultIs(int orderedCount, Object[][] expected) throws 
SQLException {
+final Statement statement = 
start.getConnection().createStatement();
+final ResultSet resultSet = statement.executeQuery(sql);
+checkResultUnordered(resultSet, expected, orderedCount, null);
+resultSet.close();
+statement.close();
+return this;
+}
+
+public Sql resultIsSomeOf(int count, Object[][] expected) throws 
SQLException {
+final Statement statement = 
start.getConnection().createStatement();
+final ResultSet resultSet = statement.executeQuery(sql);
+checkResultUnordered(resultSet, expected, 0, count);
 resultSet.close();
 statement.close();
 return this;
@@ -198,21 +212,28 @@ public class BaseCalciteIT extends 
BaseClientManagedTimeIT {
 assertFalse("Got more rows than expected.", resultSet.next()); 
   
 }
 
-private void checkResultUnordered(ResultSet resultSet, Object[][] 
expected) throws SQLException {
+private void checkResultUnordered(ResultSet resultSet, Object[][] 
expected, int orderedCount, Integer checkContains) throws SQLException {
 List> expectedResults = Lists.newArrayList();
 List> actualResults = Lists.newArrayList();
 List> errorResults = Lists.newArrayList();
 int columnCount = expected.length > 0 ? expected[0].length : 0;
 for (Object[] e : expected) {
 List row = Lists.newArrayList();
-for (Object o : e) {
-row.add(canonicalize(o));
+for (int i = orderedCount; i < e.length; i++) {
+row.add(canonicalize(e[i]));
 }
 expectedResults.add(row);
 }
 while (resultSet.next()) {
+// check the ordered part
+Object[] row = expected[actualResults.s

[13/50] [abbrv] phoenix git commit: PHOENIX-2665 index split while running group by query is returning duplicate results(Rajeshbabu)

2016-02-24 Thread maryannxue
PHOENIX-2665 index split while running group by query is returning duplicate 
results(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c48fee04
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c48fee04
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c48fee04

Branch: refs/heads/calcite
Commit: c48fee04e75fc9d08af981f1a2cc257e6cecdbdc
Parents: c485a40
Author: Rajeshbabu Chintaguntla 
Authored: Thu Feb 11 02:38:48 2016 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Thu Feb 11 02:38:48 2016 +0530

--
 .../java/org/apache/phoenix/compile/ScanRanges.java |  2 ++
 .../phoenix/coprocessor/BaseScannerRegionObserver.java  |  4 +++-
 .../org/apache/phoenix/iterate/BaseResultIterators.java | 12 +++-
 3 files changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c48fee04/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
index 4d343f3..719970a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.compile;
 
+import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.SCAN_ACTUAL_START_ROW;
 import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.STARTKEY_OFFSET;
 
 import java.io.IOException;
@@ -384,6 +385,7 @@ public class ScanRanges {
 if (scanStopKey.length > 0 && Bytes.compareTo(scanStartKey, 
scanStopKey) >= 0) { 
 return null; 
 }
+newScan.setAttribute(SCAN_ACTUAL_START_ROW, scanStartKey);
 newScan.setStartRow(scanStartKey);
 newScan.setStopRow(scanStopKey);
 if(keyOffset > 0) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c48fee04/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index a363459..9487b36 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -97,6 +97,7 @@ abstract public class BaseScannerRegionObserver extends 
BaseRegionObserver {
 public static final String RUN_UPDATE_STATS_ASYNC_ATTRIB = 
"_RunUpdateStatsAsync";
 public static final String SKIP_REGION_BOUNDARY_CHECK = 
"_SKIP_REGION_BOUNDARY_CHECK";
 public static final String TX_SCN = "_TxScn";
+public static final String SCAN_ACTUAL_START_ROW = "_ScanActualStartRow";
 
 /**
  * Attribute name used to pass custom annotations in Scans and Mutations 
(later). Custom annotations
@@ -137,7 +138,8 @@ abstract public class BaseScannerRegionObserver extends 
BaseRegionObserver {
 Bytes.compareTo(upperExclusiveRegionKey, 
expectedUpperRegionKey) != 0;
 } else {
 isStaleRegionBoundaries = Bytes.compareTo(lowerInclusiveScanKey, 
lowerInclusiveRegionKey) < 0 ||
-( Bytes.compareTo(upperExclusiveScanKey, 
upperExclusiveRegionKey) > 0 && upperExclusiveRegionKey.length != 0);
+( Bytes.compareTo(upperExclusiveScanKey, 
upperExclusiveRegionKey) > 0 && upperExclusiveRegionKey.length != 0) ||
+(upperExclusiveRegionKey.length != 0 && 
upperExclusiveScanKey.length == 0);
 }
 if (isStaleRegionBoundaries) {
 Exception cause = new 
StaleRegionBoundaryCacheException(region.getRegionInfo().getTable().getNameAsString());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c48fee04/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index b3235e2..3a3d1f2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -21,6 +21,7 @@ import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.EXPECTED_
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_FAILED_QUER

[41/50] [abbrv] phoenix git commit: PHOENIX-2666 Performance regression: Aggregate query with filter on table with multiple column families

2016-02-24 Thread maryannxue
PHOENIX-2666 Performance regression: Aggregate query with filter on table with 
multiple column families


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/28a8b802
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/28a8b802
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/28a8b802

Branch: refs/heads/calcite
Commit: 28a8b802c2a32f9735bf187f08ef0a9e33baf2dd
Parents: cac0305
Author: James Taylor 
Authored: Thu Feb 18 22:16:51 2016 -0800
Committer: James Taylor 
Committed: Thu Feb 18 22:16:51 2016 -0800

--
 .../org/apache/phoenix/end2end/DeleteIT.java|   3 +-
 .../StatsCollectorWithSplitsAndMultiCFIT.java   |   3 +-
 .../apache/phoenix/compile/DeleteCompiler.java  |   6 +-
 .../apache/phoenix/compile/UpsertCompiler.java  |   7 +-
 .../phoenix/filter/ColumnProjectionFilter.java  |   2 +
 .../phoenix/iterate/BaseResultIterators.java| 232 +++
 .../apache/phoenix/iterate/ExplainTable.java|  28 +--
 .../phoenix/compile/QueryCompilerTest.java  |  29 +++
 8 files changed, 189 insertions(+), 121 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/28a8b802/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
index 745c730..6b4eead 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
@@ -205,7 +205,8 @@ public class DeleteIT extends BaseHBaseManagedTimeIT {
 deleteStmt = "DELETE FROM IntIntKeyTest WHERE j IS NULL";
 stmt = conn.prepareStatement(deleteStmt);
 assertIndexUsed(conn, deleteStmt, indexName, createIndex);
-stmt.execute();
+int deleteCount = stmt.executeUpdate();
+assertEquals(3, deleteCount);
 if (!autoCommit) {
 conn.commit();
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/28a8b802/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
index 13cd54c..d922ad9 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
@@ -111,10 +111,9 @@ public class StatsCollectorWithSplitsAndMultiCFIT extends 
StatsCollectorAbstract
 
 rs = conn.createStatement().executeQuery(
 "SELECT 
COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH),COUNT(*) from 
SYSTEM.STATS where PHYSICAL_NAME = '"
-+ STATS_TEST_TABLE_NAME_NEW + "' GROUP BY 
COLUMN_FAMILY");
++ STATS_TEST_TABLE_NAME_NEW + "' GROUP BY 
COLUMN_FAMILY ORDER BY COLUMN_FAMILY");
 
 assertTrue(rs.next());
-assertTrue(rs.next());
 assertEquals("A", rs.getString(1));
 assertEquals(25, rs.getInt(2));
 assertEquals(12420, rs.getInt(3));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/28a8b802/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 4c41f82..8e9e1de 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -508,7 +508,11 @@ public class DeleteCompiler {
 // The coprocessor will delete each row returned from the scan
 // Ignoring ORDER BY, since with auto commit on and no limit 
makes no difference
 SelectStatement aggSelect = 
SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint());
-final RowProjector projector = 
ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY);
+RowProjector projectorToBe = 
ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY);
+if (plan.getProjector().projectEveryRow()) {
+projectorToBe = new RowProjector(projectorToBe,true);
+}
+final RowProjector projector = projectorToBe;
 final QueryPlan aggPlan 

[19/50] [abbrv] phoenix git commit: PHOENIX-2674 PhoenixMapReduceUtil#setInput doesn't honor condition clause

2016-02-24 Thread maryannxue
PHOENIX-2674 PhoenixMapReduceUtil#setInput doesn't honor condition clause

Setting the condition in the PhoenixMapReduceUtil,
as well as some slight cleanup for duplicate code
in setInput(). Adding a test that covers mapreduce
with and without a condition.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8ece81b5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8ece81b5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8ece81b5

Branch: refs/heads/calcite
Commit: 8ece81b5522df3e6bd9dfdb3112e101215bb49f1
Parents: 0c1fd3a
Author: Jesse Yates 
Authored: Wed Feb 10 12:46:47 2016 -0800
Committer: Jesse Yates 
Committed: Fri Feb 12 12:15:42 2016 -0800

--
 .../org/apache/phoenix/end2end/MapReduceIT.java | 230 +++
 .../mapreduce/util/PhoenixMapReduceUtil.java|  65 +++---
 2 files changed, 264 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8ece81b5/phoenix-core/src/it/java/org/apache/phoenix/end2end/MapReduceIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MapReduceIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MapReduceIT.java
new file mode 100644
index 000..f030701
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MapReduceIT.java
@@ -0,0 +1,230 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.DoubleWritable;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+import org.apache.phoenix.mapreduce.PhoenixOutputFormat;
+import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+import org.apache.phoenix.mapreduce.util.PhoenixMapReduceUtil;
+import org.apache.phoenix.schema.types.PDouble;
+import org.apache.phoenix.schema.types.PhoenixArray;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.sql.*;
+
+import static org.junit.Assert.*;
+
+/**
+ * Test that our MapReduce basic tools work as expected
+ */
+public class MapReduceIT extends BaseHBaseManagedTimeIT {
+
+private static final String STOCK_TABLE_NAME = "stock";
+private static final String STOCK_STATS_TABLE_NAME = "stock_stats";
+private static final String STOCK_NAME = "STOCK_NAME";
+private static final String RECORDING_YEAR = "RECORDING_YEAR";
+private static final String RECORDINGS_QUARTER = "RECORDINGS_QUARTER";
+private static final String CREATE_STOCK_TABLE = "CREATE TABLE IF NOT 
EXISTS " + STOCK_TABLE_NAME + " ( " +
+STOCK_NAME + " VARCHAR NOT NULL ," + RECORDING_YEAR + " INTEGER 
NOT  NULL, " + RECORDINGS_QUARTER +
+" DOUBLE array[] CONSTRAINT pk PRIMARY KEY (" + STOCK_NAME + " , " 
+ RECORDING_YEAR + "))";
+
+private static final String MAX_RECORDING = "MAX_RECORDING";
+private static final String CREATE_STOCK_STATS_TABLE =
+"CREATE TABLE IF NOT EXISTS " + STOCK_STATS_TABLE_NAME + "(" + 
STOCK_NAME + " VARCHAR NOT NULL , "
++ MAX_RECORDING + " DOUBLE CONSTRAINT pk PRIMARY KEY (" + 
STOCK_NAME + "))";
+private static final String UPSERT = "UPSERT into " + STOCK_TABLE_NAME + " 
values (?, ?, ?)";
+
+@Before
+public void setupTables() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute(CREATE_STOCK_TABLE);
+conn.createStatement().execute(CREATE_STOCK_STATS_TABLE);
+conn.commit();
+}
+
+@Test
+public void testNoConditionsOnSelect() throws Exception {
+final Configuration conf = getUtility().getConfiguration();
+Job job = Job.getInstance(conf);
+PhoenixMapReduceUtil.setInput(job, Stoc

[16/50] [abbrv] phoenix git commit: PHOENIX-2658 When using QueryRunner API UNION ALL queries fail with NPE (Alicia Ying Shu)

2016-02-24 Thread maryannxue
PHOENIX-2658 When using QueryRunner API UNION ALL queries fail with NPE (Alicia 
Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0c21539c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0c21539c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0c21539c

Branch: refs/heads/calcite
Commit: 0c21539cc331b8d6ca144604cf899068ad74fb25
Parents: 18f7a69
Author: James Taylor 
Authored: Thu Feb 11 20:10:23 2016 -0800
Committer: James Taylor 
Committed: Thu Feb 11 20:14:32 2016 -0800

--
 .../org/apache/phoenix/end2end/UnionAllIT.java  | 49 +++-
 .../apache/phoenix/compile/QueryCompiler.java   |  3 +-
 2 files changed, 50 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0c21539c/phoenix-core/src/it/java/org/apache/phoenix/end2end/UnionAllIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UnionAllIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UnionAllIT.java
index 6531129..b391dcc 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UnionAllIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UnionAllIT.java
@@ -40,7 +40,6 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-
 public class UnionAllIT extends BaseOwnClusterHBaseManagedTimeIT {
 
 @BeforeClass
@@ -679,4 +678,52 @@ public class UnionAllIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 conn.close();
 }
 }
+
+@Test
+public void testParameterMetaDataNotNull() throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+
+String ddl = "CREATE TABLE test_table " +
+"  (a_string varchar not null, col1 integer" +
+"  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+createTestTable(getUrl(), ddl);
+String dml = "UPSERT INTO test_table VALUES(?, ?)";
+PreparedStatement stmt = conn.prepareStatement(dml);
+stmt.setString(1, "a");
+stmt.setInt(2, 10);
+stmt.execute();
+conn.commit();
+
+ddl = "CREATE TABLE b_table " +
+"  (a_string varchar not null, col1 integer" +
+"  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+createTestTable(getUrl(), ddl);
+dml = "UPSERT INTO b_table VALUES(?, ?)";
+stmt = conn.prepareStatement(dml);
+stmt.setString(1, "b");
+stmt.setInt(2, 20);
+stmt.execute();
+conn.commit();
+
+String query = "select * from test_table union all select * from 
b_table";
+
+try{
+PreparedStatement pstmt = conn.prepareStatement(query);
+assertTrue(pstmt.getParameterMetaData() != null);
+ResultSet rs = pstmt.executeQuery();
+assertTrue(rs.next());
+assertEquals("a",rs.getString(1));
+assertEquals(10,rs.getInt(2));
+assertTrue(rs.next());
+assertEquals("b",rs.getString(1));
+assertEquals(20,rs.getInt(2));
+assertFalse(rs.next()); 
+} catch (Exception ex) {
+ex.printStackTrace();
+} finally {
+conn.close();
+}
+} 
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0c21539c/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
index 70bb815..9e756c8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
@@ -182,7 +182,8 @@ public class QueryCompiler {
 StatementContext context = new StatementContext(statement, resolver, 
scan, sequenceManager);
 
 QueryPlan plan = compileSingleFlatQuery(context, select, 
statement.getParameters(), false, false, null, null, false);
-plan =  new UnionPlan(context, select, tableRef, plan.getProjector(), 
plan.getLimit(), plan.getOrderBy(), GroupBy.EMPTY_GROUP_BY, plans, null); 
+plan =  new UnionPlan(context, select, tableRef, plan.getProjector(), 
plan.getLimit(), plan.getOrderBy(), GroupBy.EMPTY_GROUP_BY, 
+plans, context.getBindManager().getParameterMetaData()); 
 return plan;
 }
 



[30/50] [abbrv] phoenix git commit: PHOENIX-2683 store rowCount and byteCount at guidePost level(Ankit Singhal)

2016-02-24 Thread maryannxue
PHOENIX-2683 store rowCount and byteCount at guidePost level(Ankit Singhal)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d2fcd21d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d2fcd21d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d2fcd21d

Branch: refs/heads/calcite
Commit: d2fcd21d4f55c17585adbbc41f72027323a8a870
Parents: e797b36
Author: Ankit Singhal 
Authored: Tue Feb 16 22:03:58 2016 +0530
Committer: Ankit Singhal 
Committed: Tue Feb 16 22:03:58 2016 +0530

--
 .../StatsCollectorWithSplitsAndMultiCFIT.java   |  66 +++-
 .../generated/PGuidePostsProtos.java| 336 ++-
 .../org/apache/phoenix/execute/ScanPlan.java|   5 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |  25 +-
 .../phoenix/schema/stats/GuidePostsInfo.java|  69 ++--
 .../schema/stats/GuidePostsInfoBuilder.java |  61 ++--
 .../schema/stats/StatisticsCollector.java   |  21 +-
 .../phoenix/schema/stats/StatisticsWriter.java  |  18 +-
 phoenix-protocol/src/main/PGuidePosts.proto |   2 +
 9 files changed, 502 insertions(+), 101 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d2fcd21d/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
index dfe8b60..13cd54c 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
@@ -26,9 +26,11 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
+import java.sql.SQLException;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.Random;
 
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -102,10 +104,10 @@ public class StatsCollectorWithSplitsAndMultiCFIT extends 
StatsCollectorAbstract
 
 TestUtil.analyzeTable(conn, STATS_TEST_TABLE_NAME_NEW);
 String query = "UPDATE STATISTICS " + STATS_TEST_TABLE_NAME_NEW + " 
SET \""
-+ QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\"=" + 
Long.toString(2000);
++ QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\"=" + 
Long.toString(250);
 conn.createStatement().execute(query);
 keyRanges = getAllSplits(conn, STATS_TEST_TABLE_NAME_NEW);
-assertEquals(6, keyRanges.size());
+assertEquals(26, keyRanges.size());
 
 rs = conn.createStatement().executeQuery(
 "SELECT 
COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH),COUNT(*) from 
SYSTEM.STATS where PHYSICAL_NAME = '"
@@ -115,27 +117,71 @@ public class StatsCollectorWithSplitsAndMultiCFIT extends 
StatsCollectorAbstract
 assertTrue(rs.next());
 assertEquals("A", rs.getString(1));
 assertEquals(25, rs.getInt(2));
-assertEquals(11040, rs.getInt(3));
-assertEquals(5, rs.getInt(4));
+assertEquals(12420, rs.getInt(3));
+assertEquals(25, rs.getInt(4));
 
 assertTrue(rs.next());
 assertEquals("B", rs.getString(1));
 assertEquals(20, rs.getInt(2));
-assertEquals(4432, rs.getInt(3));
-assertEquals(2, rs.getInt(4));
+assertEquals(5540, rs.getInt(3));
+assertEquals(20, rs.getInt(4));
 
 assertTrue(rs.next());
 assertEquals("C", rs.getString(1));
 assertEquals(25, rs.getInt(2));
-assertEquals(6652, rs.getInt(3));
-assertEquals(3, rs.getInt(4));
+assertEquals(6930, rs.getInt(3));
+assertEquals(25, rs.getInt(4));
 
 assertTrue(rs.next());
 assertEquals("D", rs.getString(1));
 assertEquals(25, rs.getInt(2));
-assertEquals(6652, rs.getInt(3));
-assertEquals(3, rs.getInt(4));
+assertEquals(6930, rs.getInt(3));
+assertEquals(25, rs.getInt(4));
 
 }
 
+@Test
+public void testRowCountAndByteCounts() throws SQLException {
+Connection conn;
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+conn = DriverManager.getConnection(getUrl(), props);
+String tableName = "T";
+String ddl = "CREATE TABLE " + tableName + " (t_id VARCHAR NOT 
NULL,\n" + "k1 INTEGER NOT NULL,\n"
++ "k2 INTEGER NOT NULL,\n" + "C3.k3 INTEGER,\n" + "C2.v1 
VARCHAR,\n"
++ "CONSTRAINT pk PRIMARY KEY (t_id, k1, k2)) spl

[42/50] [abbrv] phoenix git commit: LP-2692 Config setting for disabling stats

2016-02-24 Thread maryannxue
LP-2692 Config setting for disabling stats

Add configuration setting to allow disabling stats collection, for
environments where it is not desired or is causing issues.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c2cc1be6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c2cc1be6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c2cc1be6

Branch: refs/heads/calcite
Commit: c2cc1be60492844779ab713d5cd84d37a17e6651
Parents: 28a8b80
Author: Gabriel Reid 
Authored: Thu Feb 18 10:20:36 2016 +0100
Committer: Gabriel Reid 
Committed: Fri Feb 19 15:22:53 2016 +0100

--
 .../end2end/StatsCollectionDisabledIT.java  |  70 ++
 .../UngroupedAggregateRegionObserver.java   |  12 +-
 .../org/apache/phoenix/query/QueryServices.java |   1 +
 .../stats/DefaultStatisticsCollector.java   | 223 +++
 .../schema/stats/NoOpStatisticsCollector.java   |  72 ++
 .../phoenix/schema/stats/PTableStats.java   |   2 +-
 .../schema/stats/StatisticsCollector.java   | 213 +++---
 .../stats/StatisticsCollectorFactory.java   |  63 ++
 .../phoenix/schema/stats/StatisticsScanner.java |   2 +-
 .../phoenix/schema/stats/StatisticsWriter.java  |   6 +-
 10 files changed, 471 insertions(+), 193 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2cc1be6/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectionDisabledIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectionDisabledIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectionDisabledIT.java
new file mode 100644
index 000..a92a665
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectionDisabledIT.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Map;
+import java.util.Properties;
+
+import com.google.common.collect.Maps;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertFalse;
+
+/**
+ * Verifies that statistics are not collected if they are disabled via a 
setting
+ */
+public class StatsCollectionDisabledIT extends StatsCollectorAbstractIT {
+
+@BeforeClass
+public static void doSetup() throws Exception {
+Map props = Maps.newHashMapWithExpectedSize(3);
+// Must update config before starting server
+props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 
Long.toString(20));
+props.put(QueryServices.STATS_ENABLED_ATTRIB, Boolean.toString(false));
+setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+}
+
+@Test
+public void testStatisticsAreNotWritten() throws SQLException {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+Statement stmt = conn.createStatement();
+stmt.execute("CREATE TABLE T1 (ID INTEGER NOT NULL PRIMARY KEY, NAME 
VARCHAR)");
+stmt.execute("UPSERT INTO T1 VALUES (1, 'NAME1')");
+stmt.execute("UPSERT INTO T1 VALUES (2, 'NAME2')");
+stmt.execute("UPSERT INTO T1 VALUES (3, 'NAME3')");
+conn.commit();
+stmt.execute("UPDATE STATISTICS T1");
+ResultSet rs = stmt.executeQuery("SELECT * FROM SYSTEM.STATS");
+assertFalse(rs.next());
+rs.close();
+stmt.close();
+conn.close();
+}
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2cc1be6/phoenix-core/src/main/java/org/

[22/50] [abbrv] phoenix git commit: PHOENIX-2667 Race condition between IndexBuilder and Split for region lock

2016-02-24 Thread maryannxue
PHOENIX-2667 Race condition between IndexBuilder and Split for region lock


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cdaca287
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cdaca287
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cdaca287

Branch: refs/heads/calcite
Commit: cdaca287cd50fbdd25a9b11d8af6fb0a3b3956cc
Parents: 04c3819
Author: James Taylor 
Authored: Sat Feb 13 15:49:31 2016 -0800
Committer: James Taylor 
Committed: Sat Feb 13 15:53:29 2016 -0800

--
 .../phoenix/hbase/index/builder/IndexBuildManager.java| 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cdaca287/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java
index ae2125e..f411b8e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java
@@ -39,7 +39,8 @@ import 
org.apache.phoenix.hbase.index.parallel.QuickFailingTaskRunner;
 import org.apache.phoenix.hbase.index.parallel.Task;
 import org.apache.phoenix.hbase.index.parallel.TaskBatch;
 import org.apache.phoenix.hbase.index.parallel.ThreadPoolBuilder;
-import org.apache.phoenix.hbase.index.parallel.ThreadPoolManager;
+
+import com.google.common.util.concurrent.MoreExecutors;
 
 /**
  * Manage the building of index updates from primary table updates.
@@ -77,10 +78,11 @@ public class IndexBuildManager implements Stoppable {
* @throws IOException if an {@link IndexBuilder} cannot be correctly steup
*/
   public IndexBuildManager(RegionCoprocessorEnvironment env) throws 
IOException {
-this(getIndexBuilder(env), new 
QuickFailingTaskRunner(ThreadPoolManager.getExecutor(
-  getPoolBuilder(env), env)));
+// Prevent deadlock by using single thread for all reads so that we know
+// we can get the ReentrantRWLock. See PHOENIX-2671 for more details.
+this(getIndexBuilder(env), new 
QuickFailingTaskRunner(MoreExecutors.sameThreadExecutor()));
   }
-
+  
   private static IndexBuilder getIndexBuilder(RegionCoprocessorEnvironment e) 
throws IOException {
 Configuration conf = e.getConfiguration();
 Class builderClass =



[29/50] [abbrv] phoenix git commit: PHOENIX-1973 Improve CsvBulkLoadTool performance by moving keyvalue construction from map phase to reduce phase(Sergey Soldatov)

2016-02-24 Thread maryannxue
PHOENIX-1973 Improve CsvBulkLoadTool performance by moving keyvalue 
construction from map phase to reduce phase(Sergey Soldatov)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e797b36c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e797b36c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e797b36c

Branch: refs/heads/calcite
Commit: e797b36c2ce42e9b9fd6b37fd8b9f79f79d6f18f
Parents: 60ef7cd
Author: Rajeshbabu Chintaguntla 
Authored: Tue Feb 16 12:12:23 2016 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Tue Feb 16 12:12:23 2016 +0530

--
 .../phoenix/mapreduce/AbstractBulkLoadTool.java |   6 +-
 .../mapreduce/FormatToKeyValueMapper.java   | 164 ---
 .../mapreduce/FormatToKeyValueReducer.java  | 127 --
 .../bulkload/TargetTableRefFunctions.java   |  22 ++-
 4 files changed, 281 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e797b36c/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index 39ee4b1..ab2848f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
@@ -268,7 +269,7 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 
 job.setInputFormatClass(TextInputFormat.class);
 job.setMapOutputKeyClass(TableRowkeyPair.class);
-job.setMapOutputValueClass(KeyValue.class);
+job.setMapOutputValueClass(ImmutableBytesWritable.class);
 job.setOutputKeyClass(TableRowkeyPair.class);
 job.setOutputValueClass(KeyValue.class);
 job.setReducerClass(FormatToKeyValueReducer.class);
@@ -276,7 +277,10 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 MultiHfileOutputFormat.configureIncrementalLoad(job, tablesToBeLoaded);
 
 final String tableNamesAsJson = 
TargetTableRefFunctions.NAMES_TO_JSON.apply(tablesToBeLoaded);
+final String logicalNamesAsJson = 
TargetTableRefFunctions.LOGICAN_NAMES_TO_JSON.apply(tablesToBeLoaded);
+
 
job.getConfiguration().set(FormatToKeyValueMapper.TABLE_NAMES_CONFKEY,tableNamesAsJson);
+
job.getConfiguration().set(FormatToKeyValueMapper.LOGICAL_NAMES_CONFKEY,logicalNamesAsJson);
 
 // give subclasses their hook
 setupJob(job);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e797b36c/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
index 7e115e5..95b099e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueMapper.java
@@ -17,30 +17,30 @@
  */
 package org.apache.phoenix.mapreduce;
 
+import java.io.DataOutputStream;
 import java.io.IOException;
 import java.sql.SQLException;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
+import java.util.*;
 import javax.annotation.Nullable;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.mapreduce.bulkload.TableRowkeyPair;
 import org.apache.phoenix.mapreduce.bulkload.TargetTableRefFunctions;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
-import org.apache.phoenix.util.ColumnInfo;
-import org.apache.

[15/50] [abbrv] phoenix git commit: PHOENIX-2681 Avoid usage of HashSet in guideposts selection

2016-02-24 Thread maryannxue
PHOENIX-2681 Avoid usage of HashSet in guideposts selection


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/18f7a694
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/18f7a694
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/18f7a694

Branch: refs/heads/calcite
Commit: 18f7a69452eec7fd5fde38953510600c4a060151
Parents: decbfe3
Author: James Taylor 
Authored: Thu Feb 11 20:09:16 2016 -0800
Committer: James Taylor 
Committed: Thu Feb 11 20:14:31 2016 -0800

--
 .../phoenix/end2end/MultiCfQueryExecIT.java | 51 
 .../phoenix/iterate/BaseResultIterators.java| 35 ++
 2 files changed, 66 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/18f7a694/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
index f5566ce..2b14fe9 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
@@ -52,6 +52,7 @@ public class MultiCfQueryExecIT extends 
BaseOwnClusterClientManagedTimeIT {
 Map props = Maps.newHashMapWithExpectedSize(3);
 // Must update config before starting server
 props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 
Long.toString(20));
+props.put(QueryServices.QUEUE_SIZE_ATTRIB, Long.toString(200));
 setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
 }
 
@@ -184,6 +185,56 @@ public class MultiCfQueryExecIT extends 
BaseOwnClusterClientManagedTimeIT {
 }
 
 @Test
+public void testGuidePostsForMultiCFsOverUnevenDistrib() throws Exception {
+long ts = nextTimestamp();
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 10));
+Connection conn = DriverManager.getConnection(getUrl(), props);
+
+conn.createStatement().execute("CREATE TABLE T_6CF (K1 CHAR(1) NOT 
NULL, "
++ "K2 VARCHAR NOT NULL, "
++ "CF1.A INTEGER, "
++ "CF2.B INTEGER, "
++ "CF3.C INTEGER, "
++ "CF4.D INTEGER, "
++ "CF5.E INTEGER, "
++ "CF6.F INTEGER "
++ "CONSTRAINT PK PRIMARY KEY (K1,K2)) SPLIT ON ('B','C','D')");
+
+conn.close();
+props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 20));
+conn = DriverManager.getConnection(getUrl(), props);
+for (int i = 0; i < 100; i++) {
+String upsert = "UPSERT INTO T_6CF(K1,K2,A) VALUES('" + 
Character.toString((char)('A'+i%10)) + "','" + (i*10) + "'," + i + ")";
+conn.createStatement().execute(upsert);
+if (i % 10 == 0) {
+conn.createStatement().execute("UPSERT INTO T_6CF(K1,K2,F) 
VALUES('" + Character.toString((char)('A'+i%10)) + "','" + (i*10) + "'," + (i * 
10) + ")");
+}
+}
+conn.commit();
+conn.close();
+
+props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 40));
+conn = DriverManager.getConnection(getUrl(), props);
+try {
+analyzeTable(getUrl(), ts + 30, "T_6CF");
+PreparedStatement statement = conn.prepareStatement("select 
count(*) from T_6CF where f < 400");
+ResultSet rs = statement.executeQuery();
+assertTrue(rs.next());
+assertEquals(4, rs.getLong(1));
+assertFalse(rs.next());
+List splits = getAllSplits(conn, "T_6CF", "f < 400", 
"COUNT(*)");
+// Uses less populated column f
+assertEquals(14, splits.size());
+// Uses more populated column a
+splits = getAllSplits(conn, "T_6CF", "a < 80", "COUNT(*)");
+assertEquals(104, splits.size());
+} finally {
+conn.close();
+}
+}
+
+@Test
 public void testGuidePostsRetrievedForMultiCF() throws Exception {
   Connection conn;
   PreparedStatement stmt;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/18f7a694/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 3a3d1f2..2352e94 100644
--- 
a/phoen

[11/50] [abbrv] phoenix git commit: PHOENIX-2656 Shield Phoenix from Tephra repackaging

2016-02-24 Thread maryannxue
PHOENIX-2656 Shield Phoenix from Tephra repackaging


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d5518f02
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d5518f02
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d5518f02

Branch: refs/heads/calcite
Commit: d5518f02d85e2cd92955377fc3934a266eaa1fa6
Parents: 1c3a86d
Author: Thomas D'Silva 
Authored: Mon Feb 8 14:22:25 2016 -0800
Committer: Thomas D'Silva 
Committed: Mon Feb 8 18:02:25 2016 -0800

--
 .../phoenix/end2end/AlterTableWithViewsIT.java  |   7 +-
 .../org/apache/phoenix/tx/TransactionIT.java|  12 +-
 .../coprocessor/DelegateRegionObserver.java | 562 +++
 .../PhoenixTransactionalProcessor.java  |  28 +
 .../query/ConnectionQueryServicesImpl.java  |  12 +-
 5 files changed, 605 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5518f02/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
index e3d78ea..f1816cc 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.coprocessor.PhoenixTransactionalProcessor;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -58,8 +59,6 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import co.cask.tephra.hbase11.coprocessor.TransactionProcessor;
-
 import com.google.common.base.Objects;
 import com.google.common.collect.Maps;
 
@@ -1126,7 +1125,7 @@ public class AlterTableWithViewsIT extends 
BaseHBaseManagedTimeIT {
 assertTableDefinition(conn, "VIEWOFTABLE", PTableType.VIEW, 
"TABLEWITHVIEW", 0, 5, 3, "ID", "COL1", "COL2", "VIEW_COL1", "VIEW_COL2");
 
 HTableInterface htable = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("TABLEWITHVIEW"));
-
assertFalse(htable.getTableDescriptor().getCoprocessors().contains(TransactionProcessor.class.getName()));
+
assertFalse(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
 assertFalse(conn.unwrap(PhoenixConnection.class).getTable(new 
PTableKey(null, "TABLEWITHVIEW")).isTransactional());
 assertFalse(conn.unwrap(PhoenixConnection.class).getTable(new 
PTableKey(null, "VIEWOFTABLE")).isTransactional());
 
@@ -1135,7 +1134,7 @@ public class AlterTableWithViewsIT extends 
BaseHBaseManagedTimeIT {
 // query the view to force the table cache to be updated
 conn.createStatement().execute("SELECT * FROM VIEWOFTABLE");
 htable = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("TABLEWITHVIEW"));
-
assertTrue(htable.getTableDescriptor().getCoprocessors().contains(TransactionProcessor.class.getName()));
+
assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
 assertTrue(conn.unwrap(PhoenixConnection.class).getTable(new 
PTableKey(null, "TABLEWITHVIEW")).isTransactional());
 assertTrue(conn.unwrap(PhoenixConnection.class).getTable(new 
PTableKey(null, "VIEWOFTABLE")).isTransactional());
 } 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5518f02/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
index 1bf313b..1fd9828 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.coprocessor.PhoenixTransactionalProcessor;
 import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
 i

[39/50] [abbrv] phoenix git commit: PHOENIX-2689 VARCHAR Field Not Working With String Concatenation

2016-02-24 Thread maryannxue
PHOENIX-2689 VARCHAR Field Not Working With String Concatenation


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/45a9d670
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/45a9d670
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/45a9d670

Branch: refs/heads/calcite
Commit: 45a9d670bbb5e659fb967cfdbc6fc1ced43fba12
Parents: 7d90e88
Author: James Taylor 
Authored: Thu Feb 18 12:16:27 2016 -0800
Committer: James Taylor 
Committed: Thu Feb 18 12:16:27 2016 -0800

--
 .../apache/phoenix/end2end/LpadFunctionIT.java  | 242 --
 .../org/apache/phoenix/end2end/StringIT.java| 254 +++
 .../expression/StringConcatExpression.java  |  21 +-
 3 files changed, 271 insertions(+), 246 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/45a9d670/phoenix-core/src/it/java/org/apache/phoenix/end2end/LpadFunctionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LpadFunctionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LpadFunctionIT.java
deleted file mode 100644
index 4070103..000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LpadFunctionIT.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/**
- *  Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.util.ByteUtil;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.Test;
-
-import com.google.common.collect.Lists;
-
-/**
- * Tests for the LPAD built-in function.
- */
-
-public class LpadFunctionIT extends BaseHBaseManagedTimeIT {
-
-/**
- * Helper to test LPAD function
- * 
- * @param conn
- *connection to be used
- * @param colName
- *name of column to query
- * @param length
- *length of the output string
- * @param fillString
- *fill characters to be used while prepending
- * @param sortOrder
- *sort order of the pk column
- * @param expectedOutput
- *expected output of LPAD function
- */
-private void testLpadHelper(Connection conn, String colName, int length, 
List fillStringList,
-List expectedOutputList, String sortOrder) throws Exception {
-assertEquals("fillStringList and expectedOutputList should be of equal 
size", fillStringList.size(),
-expectedOutputList.size());
-for (int id = 0; id < fillStringList.size(); ++id) {
-String fillString = fillStringList.get(id);
-String lPadExpr = fillString != null ? "LPAD(%s,?,?)" : 
"LPAD(%s,?)";
-String sql = String.format("SELECT " + lPadExpr + " FROM 
TEST_TABLE_%s WHERE id=?", colName, sortOrder);
-PreparedStatement stmt = conn.prepareStatement(sql);
-int index = 1;
-stmt.setInt(index++, length);
-if (fillString != null)
-stmt.setString(index++, fillString);
-stmt.setInt(index++, id);
-
-ResultSet rs = stmt.executeQuery();
-assertTrue("Expected exactly one row to be returned ", rs.next());
-assertEquals("LPAD returned incorrect result ", 
expectedOutputList.get(id), rs.getString(1));
-assertFalse("Expected exactly one row to be returned ", rs.next());
-}
-}
-
-/**
- * Helper to test LPAD function
- * 
- * @param conn
- *connection to phoenix
- * @param inputList
- * 

[44/50] [abbrv] phoenix git commit: PHOENIX-2670 Guava version incompatibility of Twill vs HBase when launching M/R index

2016-02-24 Thread maryannxue
PHOENIX-2670 Guava version incompatibility of Twill vs HBase when launching M/R 
index


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/61fa462b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/61fa462b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/61fa462b

Branch: refs/heads/calcite
Commit: 61fa462b433296c29b1046e035f811ae159eaa85
Parents: 9dd6bab
Author: James Taylor 
Authored: Fri Feb 19 14:33:27 2016 -0800
Committer: James Taylor 
Committed: Fri Feb 19 14:34:21 2016 -0800

--
 .../phoenix/query/ConnectionQueryServicesImpl.java| 14 --
 pom.xml   |  2 +-
 2 files changed, 9 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/61fa462b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 9a385b2..d55ab30 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -193,6 +193,7 @@ import com.google.common.base.Joiner;
 import com.google.common.base.Throwables;
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
+import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
@@ -204,6 +205,7 @@ import co.cask.tephra.TransactionSystemClient;
 import co.cask.tephra.TxConstants;
 import co.cask.tephra.distributed.PooledClientProvider;
 import co.cask.tephra.distributed.TransactionServiceClient;
+import co.cask.tephra.zookeeper.TephraZKClientService;
 
 
 public class ConnectionQueryServicesImpl extends DelegateQueryServices 
implements ConnectionQueryServices {
@@ -350,14 +352,14 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 if (zkQuorumServersString==null) {
 zkQuorumServersString = 
connectionInfo.getZookeeperQuorum()+":"+connectionInfo.getPort();
 }
+
+int timeOut = props.getInt(HConstants.ZK_SESSION_TIMEOUT, 
HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
+// Create instance of the tephra zookeeper client 
+ZKClientService tephraZKClientService = new 
TephraZKClientService(zkQuorumServersString, timeOut, null, 
ArrayListMultimap.create());
+
 ZKClientService zkClientService = ZKClientServices.delegate(
   ZKClients.reWatchOnExpire(
-ZKClients.retryOnFailure(
-  ZKClientService.Builder.of(zkQuorumServersString)
-
.setSessionTimeout(props.getInt(HConstants.ZK_SESSION_TIMEOUT, 
HConstants.DEFAULT_ZK_SESSION_TIMEOUT))
-.build(),
-  RetryStrategies.exponentialDelay(500, 2000, 
TimeUnit.MILLISECONDS)
-)
+ ZKClients.retryOnFailure(tephraZKClientService, 
RetryStrategies.exponentialDelay(500, 2000, TimeUnit.MILLISECONDS))
   )
 );
 zkClientService.startAndWait();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/61fa462b/pom.xml
--
diff --git a/pom.xml b/pom.xml
index fe79577..b27c3b9 100644
--- a/pom.xml
+++ b/pom.xml
@@ -112,7 +112,7 @@
 2.1.2
 1.6.0
 8.1.7.v20120910
-0.6.5
+0.7.0
 1.5.2
 2.10.4
 2.10



[14/50] [abbrv] phoenix git commit: PHOENIX-2334 CSV Bulk load fails on local indexes(Rajeshbabu)

2016-02-24 Thread maryannxue
PHOENIX-2334 CSV Bulk load fails on local indexes(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/decbfe30
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/decbfe30
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/decbfe30

Branch: refs/heads/calcite
Commit: decbfe3062bbc970050e03fbb198e61a2d30e88c
Parents: c48fee0
Author: Rajeshbabu Chintaguntla 
Authored: Thu Feb 11 02:48:05 2016 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Thu Feb 11 02:48:05 2016 +0530

--
 .../phoenix/end2end/CsvBulkLoadToolIT.java  | 27 +++-
 .../phoenix/mapreduce/AbstractBulkLoadTool.java | 16 
 .../mapreduce/bulkload/TargetTableRef.java  |  2 +-
 .../phoenix/query/ConnectionQueryServices.java  |  1 +
 .../query/ConnectionQueryServicesImpl.java  | 27 
 .../query/ConnectionlessQueryServicesImpl.java  | 15 +++
 .../query/DelegateConnectionQueryServices.java  |  6 +
 .../java/org/apache/phoenix/util/IndexUtil.java | 10 +++-
 8 files changed, 85 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/decbfe30/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
index 26ec889..96042c5 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
@@ -218,7 +218,7 @@ public class CsvBulkLoadToolIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 
 Statement stmt = conn.createStatement();
 stmt.execute("CREATE TABLE TABLE6 (ID INTEGER NOT NULL PRIMARY KEY, " +
-"FIRST_NAME VARCHAR, LAST_NAME VARCHAR)");
+"FIRST_NAME VARCHAR, LAST_NAME VARCHAR) SPLIt ON (1,2)");
 String ddl = "CREATE LOCAL INDEX TABLE6_IDX ON TABLE6 "
 + " (FIRST_NAME ASC)";
 stmt.execute(ddl);
@@ -234,16 +234,19 @@ public class CsvBulkLoadToolIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 
 CsvBulkLoadTool csvBulkLoadTool = new CsvBulkLoadTool();
 csvBulkLoadTool.setConf(getUtility().getConfiguration());
-try {
-csvBulkLoadTool.run(new String[] {
-"--input", "/tmp/input3.csv",
-"--table", "table6",
-"--zookeeper", zkQuorum});
-fail("Csv bulk load currently has issues with local indexes.");
-} catch( UnsupportedOperationException ise) {
-assertEquals("Local indexes not supported by Bulk 
Loader",ise.getMessage());
-}
-
+int exitCode = csvBulkLoadTool.run(new String[] {
+"--input", "/tmp/input3.csv",
+"--table", "table6",
+"--zookeeper", zkQuorum});
+assertEquals(0, exitCode);
+
+ResultSet rs = stmt.executeQuery("SELECT id, FIRST_NAME FROM TABLE6 
where first_name='FirstName 2'");
+assertTrue(rs.next());
+assertEquals(2, rs.getInt(1));
+assertEquals("FirstName 2", rs.getString(2));
+
+rs.close();
+stmt.close();
 }
 
 @Test
@@ -251,7 +254,7 @@ public class CsvBulkLoadToolIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 testImportOneIndexTable("TABLE4", false);
 }
 
-//@Test
+@Test
 public void testImportOneLocalIndexTable() throws Exception {
 testImportOneIndexTable("TABLE5", true);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/decbfe30/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index f6ba5f6..39ee4b1 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -21,8 +21,10 @@ import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.UUID;
 
 import org.apache.commons.cli.CommandLine;
@@ -54,6 +56,7 @@ import 
org.apache.phoenix.mapreduce.bulkload.TargetTableRefFunctions;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.util.ColumnInfo;

[31/50] [abbrv] phoenix git commit: PHOENIX-2635 Partial index rebuild doesn't work for mutable data

2016-02-24 Thread maryannxue
PHOENIX-2635 Partial index rebuild doesn't work for mutable data


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4ccce0ed
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4ccce0ed
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4ccce0ed

Branch: refs/heads/calcite
Commit: 4ccce0ed103db04667da95ab515eda76029dacdb
Parents: d2fcd21
Author: James Taylor 
Authored: Tue Feb 16 09:00:24 2016 -0800
Committer: James Taylor 
Committed: Tue Feb 16 09:03:10 2016 -0800

--
 .../apache/phoenix/coprocessor/MetaDataRegionObserver.java  | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4ccce0ed/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 0cce4d7..9bcf2d0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -56,6 +56,7 @@ import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.phoenix.cache.GlobalCache;
 import org.apache.phoenix.cache.ServerCacheClient;
+import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.index.PhoenixIndexCodec;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -320,8 +321,12 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
 long timeStamp = Math.max(0, earliestDisableTimestamp - 
overlapTime);
 
 LOG.info("Starting to build indexes=" + 
indexesToPartiallyRebuild + " from timestamp=" + timeStamp);
-Scan dataTableScan = new Scan();
-dataTableScan.setRaw(true);
+new Scan();
+List maintainers = 
Lists.newArrayListWithExpectedSize(indexesToPartiallyRebuild.size());
+for (PTable index : indexesToPartiallyRebuild) {
+maintainers.add(index.getIndexMaintainer(dataPTable, 
conn));
+}
+Scan dataTableScan = 
IndexManagementUtil.newLocalStateScan(maintainers);
 dataTableScan.setTimeRange(timeStamp, 
HConstants.LATEST_TIMESTAMP);
 byte[] physicalTableName = 
dataPTable.getPhysicalName().getBytes();
 try (HTableInterface dataHTable = 
conn.getQueryServices().getTable(physicalTableName)) {



[48/50] [abbrv] phoenix git commit: Sync with master branch

2016-02-24 Thread maryannxue
Sync with master branch


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/58ec2579
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/58ec2579
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/58ec2579

Branch: refs/heads/calcite
Commit: 58ec2579cd5883bf19b1d24cd3dd9342fec339c8
Parents: 69a2f24 e4acd0c
Author: maryannxue 
Authored: Tue Feb 23 10:07:19 2016 -0500
Committer: maryannxue 
Committed: Tue Feb 23 10:07:19 2016 -0500

--
 bin/queryserver.py  |  14 +-
 .../phoenix/end2end/AlterTableWithViewsIT.java  |   7 +-
 .../end2end/ClientTimeArithmeticQueryIT.java|  11 +-
 .../phoenix/end2end/CompareDecimalToLongIT.java | 241 
 .../phoenix/end2end/ContextClassloaderIT.java   |   2 +-
 .../phoenix/end2end/CsvBulkLoadToolIT.java  | 114 ++--
 .../org/apache/phoenix/end2end/DateTimeIT.java  |   5 +-
 .../org/apache/phoenix/end2end/DeleteIT.java|   3 +-
 .../apache/phoenix/end2end/DistinctCountIT.java |   2 +-
 .../apache/phoenix/end2end/GroupByCaseIT.java   |  35 ++
 .../org/apache/phoenix/end2end/IndexToolIT.java | 273 +++--
 .../phoenix/end2end/LikeExpressionIT.java   |  20 +
 .../apache/phoenix/end2end/LpadFunctionIT.java  | 242 
 .../org/apache/phoenix/end2end/MapReduceIT.java | 230 
 .../phoenix/end2end/MultiCfQueryExecIT.java |  51 ++
 .../phoenix/end2end/MutableIndexToolIT.java | 128 +
 .../apache/phoenix/end2end/PercentileIT.java|   2 +-
 .../apache/phoenix/end2end/PrimitiveTypeIT.java | 245 
 .../phoenix/end2end/ProductMetricsIT.java   |   6 +-
 .../apache/phoenix/end2end/ReverseScanIT.java   |  21 +
 .../phoenix/end2end/RowValueConstructorIT.java  |   4 +-
 .../end2end/StatsCollectionDisabledIT.java  |  70 +++
 .../phoenix/end2end/StatsCollectorIT.java   |  28 +-
 .../StatsCollectorWithSplitsAndMultiCFIT.java   |  69 ++-
 .../org/apache/phoenix/end2end/StringIT.java| 254 +
 .../org/apache/phoenix/end2end/UnionAllIT.java  |  49 +-
 .../phoenix/end2end/UserDefinedFunctionsIT.java |   8 +-
 .../phoenix/end2end/VariableLengthPKIT.java |   2 +-
 .../end2end/index/DropIndexDuringUpsertIT.java  |   2 +-
 .../end2end/index/IndexExpressionIT.java|   7 +-
 .../apache/phoenix/end2end/index/IndexIT.java   |   5 +-
 .../end2end/index/MutableIndexFailureIT.java| 398 +
 .../index/MutableIndexReplicationIT.java|   2 +-
 .../end2end/index/ReadOnlyIndexFailureIT.java   | 284 ++
 .../salted/SaltedTableUpsertSelectIT.java   |  57 ++
 .../EndToEndCoveredColumnsIndexBuilderIT.java   |   2 +-
 .../example/EndToEndCoveredIndexingIT.java  |   5 +-
 .../org/apache/phoenix/tx/TransactionIT.java|  12 +-
 phoenix-core/src/main/antlr3/PhoenixSQL.g   |   5 +-
 .../apache/phoenix/calcite/PhoenixSchema.java   |   2 +-
 .../apache/phoenix/calcite/PhoenixTable.java|  12 +-
 .../calcite/rel/PhoenixRelImplementorImpl.java  |   2 +-
 .../phoenix/calcite/rel/PhoenixTableScan.java   |   5 +-
 .../apache/phoenix/compile/DeleteCompiler.java  |   6 +-
 .../apache/phoenix/compile/FromCompiler.java|   2 +-
 .../apache/phoenix/compile/GroupByCompiler.java |  74 ++-
 .../apache/phoenix/compile/JoinCompiler.java|   2 +-
 .../apache/phoenix/compile/QueryCompiler.java   |   3 +-
 .../org/apache/phoenix/compile/ScanRanges.java  |   2 +
 .../compile/TupleProjectionCompiler.java|   4 +-
 .../apache/phoenix/compile/UnionCompiler.java   |   2 +-
 .../apache/phoenix/compile/UpsertCompiler.java  |   7 +-
 .../coprocessor/BaseScannerRegionObserver.java  |  16 +-
 .../coprocessor/DelegateRegionObserver.java | 562 +++
 .../coprocessor/HashJoinRegionScanner.java  |  71 ++-
 .../coprocessor/MetaDataEndpointImpl.java   |  92 +--
 .../coprocessor/MetaDataRegionObserver.java | 136 -
 .../PhoenixTransactionalProcessor.java  |  28 +
 .../UngroupedAggregateRegionObserver.java   |  31 +-
 .../generated/PGuidePostsProtos.java| 336 ++-
 .../coprocessor/generated/PTableProtos.java | 103 +++-
 .../phoenix/exception/SQLExceptionCode.java |   7 +-
 .../apache/phoenix/execute/AggregatePlan.java   |  16 +-
 .../apache/phoenix/execute/MutationState.java   |  39 +-
 .../org/apache/phoenix/execute/ScanPlan.java|  19 +-
 .../phoenix/expression/InListExpression.java|   2 +-
 .../phoenix/expression/LiteralExpression.java   |   2 +-
 .../expression/ProjectedColumnExpression.java   |  11 +-
 .../expression/StringConcatExpression.java  |  21 +-
 .../expression/util/regex/JavaPattern.java  |   2 +-
 .../visitor/CloneExpressionVisitor.java |   2 +-
 .../phoenix/filter/ColumnProjectionFilter.java  |   2 +
 .../apache/phoenix/filter/SkipScanFilter.java   |  47 +-
 .../hbase/index/builder/IndexBuildManager.java  |  10 +-
 .../hbase/index/cover

[28/50] [abbrv] phoenix git commit: PHOENIX-2602 Parser does not handle escaped LPAREN

2016-02-24 Thread maryannxue
PHOENIX-2602 Parser does not handle escaped LPAREN


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/43b34da1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/43b34da1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/43b34da1

Branch: refs/heads/calcite
Commit: 43b34da1d4e10bef233bbb748c5dd1be11d7ce18
Parents: 046bda3
Author: James Taylor 
Authored: Mon Feb 15 01:44:31 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 10:14:58 2016 -0800

--
 phoenix-core/src/main/antlr3/PhoenixSQL.g | 7 ---
 .../test/java/org/apache/phoenix/parse/QueryParserTest.java   | 6 ++
 2 files changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/43b34da1/phoenix-core/src/main/antlr3/PhoenixSQL.g
--
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g 
b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index 0be5717..64e1d32 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -1213,14 +1213,14 @@ DIGIT
 STRING_LITERAL
 @init{ StringBuilder sb = new StringBuilder(); }
 :   '\''
-( t=CHAR_ESC { sb.append(getText()); }
-| t=CHAR { sb.append(t.getText()); }
+( t=CHAR { sb.append(t.getText()); } 
+| t=CHAR_ESC { sb.append(getText()); }
 )* '\'' { setText(sb.toString()); }
 ;
 
 fragment
 CHAR
-:   ( ~('\'') )
+:   ( ~('\'' | '\\') )
 ;
 
 fragment
@@ -1242,6 +1242,7 @@ CHAR_ESC
 | '\\'  { setText("\\"); }
 | '_'   { setText("\\_"); }
 | '%'   { setText("\\\%"); }
+|   { setText("\\"); }
 )
 |   '\'\''  { setText("\'"); }
 ;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/43b34da1/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
index 5363042..70f590f 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
@@ -766,4 +766,10 @@ public class QueryParserTest {
 String sql = "select * from t where 'a' <= ALL(a-b+1)";
 parseQuery(sql);
 }
+
+@Test
+public void testDoubleBackslash() throws Exception {
+String sql = "SELECT * FROM T WHERE A LIKE 'a\\(d'";
+parseQuery(sql);
+}
 }



[49/50] [abbrv] phoenix git commit: Revert "PHOENIX-2678 Replace calcite default RelOptCostFactory"

2016-02-24 Thread maryannxue
Revert "PHOENIX-2678 Replace calcite default RelOptCostFactory"

This reverts commit 69a2f2437bf0d6513d3f6c46ec34094279f0f411.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bc4b8917
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bc4b8917
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bc4b8917

Branch: refs/heads/calcite
Commit: bc4b89172180e963267b1588c3f31a3d94491777
Parents: 58ec257
Author: maryannxue 
Authored: Tue Feb 23 10:28:45 2016 -0500
Committer: maryannxue 
Committed: Tue Feb 23 10:28:45 2016 -0500

--
 .../org/apache/phoenix/calcite/CalciteIT.java   |   8 +-
 .../apache/phoenix/calcite/CalciteIndexIT.java  |   2 +-
 .../calcite/jdbc/PhoenixPrepareImpl.java|   2 -
 .../phoenix/calcite/plan/PhoenixCost.java   | 227 ---
 .../calcite/rel/PhoenixAbstractAggregate.java   |   2 +-
 .../calcite/rel/PhoenixAbstractSort.java|   2 +-
 .../calcite/rel/PhoenixClientAggregate.java |   3 +-
 .../phoenix/calcite/rel/PhoenixClientJoin.java  |   4 +-
 .../calcite/rel/PhoenixClientProject.java   |   3 +-
 .../calcite/rel/PhoenixClientSemiJoin.java  |   4 +-
 .../phoenix/calcite/rel/PhoenixClientSort.java  |   3 +-
 .../calcite/rel/PhoenixCompactClientSort.java   |   3 +-
 .../phoenix/calcite/rel/PhoenixCorrelate.java   |  21 +-
 .../phoenix/calcite/rel/PhoenixFilter.java  |   4 +-
 .../phoenix/calcite/rel/PhoenixLimit.java   |   4 +-
 .../calcite/rel/PhoenixMergeSortUnion.java  |   4 +-
 .../apache/phoenix/calcite/rel/PhoenixRel.java  |   7 +
 .../calcite/rel/PhoenixServerAggregate.java |   3 +-
 .../phoenix/calcite/rel/PhoenixServerJoin.java  |   4 +-
 .../calcite/rel/PhoenixServerProject.java   |   3 +-
 .../calcite/rel/PhoenixServerSemiJoin.java  |   4 +-
 .../phoenix/calcite/rel/PhoenixServerSort.java  |   3 +-
 .../phoenix/calcite/rel/PhoenixTableScan.java   |   6 +-
 .../phoenix/calcite/rel/PhoenixUncollect.java   |  15 +-
 .../phoenix/calcite/rel/PhoenixUnion.java   |   4 +-
 .../phoenix/calcite/rel/PhoenixValues.java  |   5 +-
 .../calcite/rules/PhoenixConverterRules.java|   3 +-
 27 files changed, 60 insertions(+), 293 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc4b8917/phoenix-core/src/it/java/org/apache/phoenix/calcite/CalciteIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/calcite/CalciteIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/calcite/CalciteIT.java
index 820c2d4..df6ac81 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/calcite/CalciteIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/calcite/CalciteIT.java
@@ -293,17 +293,17 @@ public class CalciteIT extends BaseCalciteIT {
 
 start(false, 1000f).sql("select t1.entity_id, t2.a_string, 
t3.organization_id from aTable t1 join aTable t2 on t1.entity_id = t2.entity_id 
and t1.organization_id = t2.organization_id join atable t3 on t1.entity_id = 
t3.entity_id and t1.organization_id = t3.organization_id")
 .explainIs("PhoenixToEnumerableConverter\n" +
-   "  PhoenixClientProject(ENTITY_ID=[$1], 
A_STRING=[$4], ORGANIZATION_ID=[$5])\n" +
-   "PhoenixClientJoin(condition=[AND(=($1, $6), 
=($0, $5))], joinType=[inner])\n" +
+   "  PhoenixClientProject(ENTITY_ID=[$1], 
A_STRING=[$6], ORGANIZATION_ID=[$2])\n" +
+   "PhoenixClientJoin(condition=[AND(=($1, $5), 
=($0, $4))], joinType=[inner])\n" +
"  PhoenixClientJoin(condition=[AND(=($1, $3), 
=($0, $2))], joinType=[inner])\n" +
"PhoenixServerSort(sort0=[$1], sort1=[$0], 
dir0=[ASC], dir1=[ASC])\n" +
"  
PhoenixServerProject(ORGANIZATION_ID=[$0], ENTITY_ID=[$1])\n" +
"PhoenixTableScan(table=[[phoenix, 
ATABLE]])\n" +
"PhoenixServerSort(sort0=[$1], sort1=[$0], 
dir0=[ASC], dir1=[ASC])\n" +
-   "  
PhoenixServerProject(ORGANIZATION_ID=[$0], ENTITY_ID=[$1], A_STRING=[$2])\n" +
+   "  
PhoenixServerProject(ORGANIZATION_ID=[$0], ENTITY_ID=[$1])\n" +
"PhoenixTableScan(table=[[phoenix, 
ATABLE]])\n" +
"  PhoenixServerSort(sort0=[$1], sort1=[$0], 
dir0=[ASC], dir1=[ASC])\n" +
-   "PhoenixServerProject(ORGANIZATION_ID=[$0], 
ENTITY_ID=[$1])\n" +
+   "PhoenixServerProject(ORGANIZATION_ID=[$0], 
ENTITY_ID=[$1], A_STRING=[$2])\n" +
"  

[23/50] [abbrv] phoenix git commit: PHOENIX-2671 System.STATS table getting truncated every time on new client connection(Ankit Singhal)

2016-02-24 Thread maryannxue
PHOENIX-2671 System.STATS table getting truncated every time on new client 
connection(Ankit Singhal)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6881aef0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6881aef0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6881aef0

Branch: refs/heads/calcite
Commit: 6881aef0cfaae8643303f0612a2a4b997b8a5138
Parents: cdaca28
Author: Ankit Singhal 
Authored: Mon Feb 15 12:04:20 2016 +0530
Committer: Ankit Singhal 
Committed: Mon Feb 15 12:04:20 2016 +0530

--
 .../query/ConnectionQueryServicesImpl.java  | 48 ++--
 .../org/apache/phoenix/util/UpgradeUtil.java| 16 +++
 2 files changed, 22 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6881aef0/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 897c207..9a385b2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -188,11 +188,6 @@ import org.apache.twill.zookeeper.ZKClients;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import co.cask.tephra.TransactionSystemClient;
-import co.cask.tephra.TxConstants;
-import co.cask.tephra.distributed.PooledClientProvider;
-import co.cask.tephra.distributed.TransactionServiceClient;
-
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Throwables;
@@ -205,6 +200,11 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
+import co.cask.tephra.TransactionSystemClient;
+import co.cask.tephra.TxConstants;
+import co.cask.tephra.distributed.PooledClientProvider;
+import co.cask.tephra.distributed.TransactionServiceClient;
+
 
 public class ConnectionQueryServicesImpl extends DelegateQueryServices 
implements ConnectionQueryServices {
 private static final Logger logger = 
LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
@@ -2370,14 +2370,16 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 // parts we haven't yet done).
 metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 2,
 
PhoenixDatabaseMetaData.TRANSACTIONAL + " " + 
PBoolean.INSTANCE.getSqlTypeName());
-metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 1, 
-
PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " " + 
PLong.INSTANCE.getSqlTypeName());
+// Drop old stats table so that new stats 
table is created
+metaConnection = 
dropStatsTable(metaConnection,
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 1);
+metaConnection = 
addColumnsIfNotExists(metaConnection,
+
PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0,
+
PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " "
++ 
PLong.INSTANCE.getSqlTypeName());
 
setImmutableTableIndexesImmutable(metaConnection);
-   // Drop 
old stats table so that new stats table is created
-   
metaConnection = dropStatsTable(metaConnection,
-   
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0);
-   // 
Clear the server cache so the above changes make it over to any clients
-   // that 
already have cached data.
+// that already have cached data.
 

[12/50] [abbrv] phoenix git commit: PHOENIX-2602 Parser does not handle escaped LPAREN

2016-02-24 Thread maryannxue
PHOENIX-2602 Parser does not handle escaped LPAREN


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c485a40c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c485a40c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c485a40c

Branch: refs/heads/calcite
Commit: c485a40c766e34e3a5a443e60057cfaa1cb92869
Parents: d5518f0
Author: Thomas D'Silva 
Authored: Mon Feb 8 16:47:36 2016 -0800
Committer: Thomas D'Silva 
Committed: Tue Feb 9 15:08:37 2016 -0800

--
 .../phoenix/end2end/LikeExpressionIT.java   | 20 
 phoenix-core/src/main/antlr3/PhoenixSQL.g   |  6 +++---
 2 files changed, 23 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c485a40c/phoenix-core/src/it/java/org/apache/phoenix/end2end/LikeExpressionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LikeExpressionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LikeExpressionIT.java
index 1d93341..ecd1e8c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LikeExpressionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LikeExpressionIT.java
@@ -123,4 +123,24 @@ public class LikeExpressionIT extends 
BaseHBaseManagedTimeIT {
 
 conn.close();
 }
+
+@Test
+public void testLikeWithEscapenLParen() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+String ddl = "CREATE TABLE t (k VARCHAR, v VARCHAR, CONSTRAINT pk 
PRIMARY KEY (k))";
+conn.createStatement().execute(ddl);
+conn.createStatement().execute("UPSERT INTO t VALUES('aa','bb')");
+conn.createStatement().execute("UPSERT INTO t VALUES('a\\(d','xx')");
+conn.createStatement().execute("UPSERT INTO t VALUES('dd',null)");
+conn.commit();
+
+ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM t 
WHERE k not like '%\\(%'");
+assertTrue(rs.next());
+assertEquals("aa", rs.getString(1));
+assertEquals("bb", rs.getString(2));
+assertTrue(rs.next());
+assertEquals("dd", rs.getString(1));
+assertEquals(null, rs.getString(2));
+assertFalse(rs.next());
+}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c485a40c/phoenix-core/src/main/antlr3/PhoenixSQL.g
--
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g 
b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index 23f7e8f..0be5717 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -1213,14 +1213,14 @@ DIGIT
 STRING_LITERAL
 @init{ StringBuilder sb = new StringBuilder(); }
 :   '\''
-( t=CHAR { sb.append(t.getText()); }
-| t=CHAR_ESC { sb.append(getText()); }
+( t=CHAR_ESC { sb.append(getText()); }
+| t=CHAR { sb.append(t.getText()); }
 )* '\'' { setText(sb.toString()); }
 ;
 
 fragment
 CHAR
-:   ( ~('\'' | '\\') )+
+:   ( ~('\'') )
 ;
 
 fragment



[36/50] [abbrv] phoenix git commit: PHOENIX-2676 Cannot support join operations in scans with limit (Maryann Xue)

2016-02-24 Thread maryannxue
PHOENIX-2676 Cannot support join operations in scans with limit (Maryann Xue)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c316d910
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c316d910
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c316d910

Branch: refs/heads/calcite
Commit: c316d91044e2e92030e3fd9e9b5fccbf5cfd5e17
Parents: 818683a
Author: James Taylor 
Authored: Wed Feb 17 11:53:11 2016 -0800
Committer: James Taylor 
Committed: Wed Feb 17 12:19:57 2016 -0800

--
 .../coprocessor/HashJoinRegionScanner.java  | 72 +---
 1 file changed, 48 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c316d910/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
index 1e34d96..8f64b55 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.regionserver.NoLimitScannerContext;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -46,6 +47,7 @@ import org.apache.phoenix.schema.KeyValueSchema;
 import org.apache.phoenix.schema.ValueBitSet;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.TupleUtil;
 
 public class HashJoinRegionScanner implements RegionScanner {
@@ -53,6 +55,7 @@ public class HashJoinRegionScanner implements RegionScanner {
 private final RegionScanner scanner;
 private final TupleProjector projector;
 private final HashJoinInfo joinInfo;
+private final RegionCoprocessorEnvironment env;
 private Queue resultQueue;
 private boolean hasMore;
 private long count;
@@ -64,6 +67,7 @@ public class HashJoinRegionScanner implements RegionScanner {
 
 @SuppressWarnings("unchecked")
 public HashJoinRegionScanner(RegionScanner scanner, TupleProjector 
projector, HashJoinInfo joinInfo, ImmutableBytesWritable tenantId, 
RegionCoprocessorEnvironment env) throws IOException {
+this.env = env;
 this.scanner = scanner;
 this.projector = projector;
 this.joinInfo = joinInfo;
@@ -250,25 +254,35 @@ public class HashJoinRegionScanner implements 
RegionScanner {
 
 @Override
 public boolean nextRaw(List result) throws IOException {
-while (shouldAdvance()) {
-hasMore = scanner.nextRaw(result);
-processResults(result, false);
-result.clear();
+try {
+while (shouldAdvance()) {
+hasMore = scanner.nextRaw(result);
+processResults(result, false);
+result.clear();
+}
+
+return nextInQueue(result);
+} catch (Throwable t) {
+
ServerUtil.throwIOException(env.getRegion().getRegionInfo().getRegionNameAsString(),
 t);
+return false; // impossible
 }
-
-return nextInQueue(result);
 }
 
 @Override
 public boolean nextRaw(List result, ScannerContext scannerContext)
 throws IOException {
-while (shouldAdvance()) {
-hasMore = scanner.nextRaw(result, scannerContext);
-processResults(result, false); // TODO fix honoring the limit
-result.clear();
+try {
+while (shouldAdvance()) {
+hasMore = scanner.nextRaw(result, scannerContext);
+processResults(result, scannerContext != 
NoLimitScannerContext.getInstance());
+result.clear();
+}
+
+return nextInQueue(result);
+} catch (Throwable t) {
+
ServerUtil.throwIOException(env.getRegion().getRegionInfo().getRegionNameAsString(),
 t);
+return false; // impossible
 }
-
-return nextInQueue(result);
 }
 
 @Override
@@ -283,24 +297,34 @@ public class HashJoinRegionScanner implements 
RegionScanner {
 
 @Override
 public boolean next(List result) throws IOException {
-while (shouldAdvance())

[46/50] [abbrv] phoenix git commit: PHONIX-2666 addendum; correct HBase version.

2016-02-24 Thread maryannxue
PHONIX-2666 addendum; correct HBase version.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d9d66ae8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d9d66ae8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d9d66ae8

Branch: refs/heads/calcite
Commit: d9d66ae807e6e69e43b36a0106d4f17bab1b0217
Parents: 0b1a180
Author: Lars Hofhansl 
Authored: Sun Feb 21 16:20:09 2016 -0800
Committer: Lars Hofhansl 
Committed: Sun Feb 21 16:20:09 2016 -0800

--
 .../main/java/org/apache/phoenix/iterate/BaseResultIterators.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d9d66ae8/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index fa09704..01b790a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -108,7 +108,7 @@ import com.google.common.collect.Lists;
 public abstract class BaseResultIterators extends ExplainTable implements 
ResultIterators {
private static final Logger logger = 
LoggerFactory.getLogger(BaseResultIterators.class);
 private static final int ESTIMATED_GUIDEPOSTS_PER_REGION = 20;
-private static final int MIN_SEEK_TO_COLUMN_VERSION = 
VersionUtil.encodeVersion("0", "94", "12");
+private static final int MIN_SEEK_TO_COLUMN_VERSION = 
VersionUtil.encodeVersion("0", "98", "12");
 
 private final List> scans;
 private final List splits;



[06/50] [abbrv] phoenix git commit: PHOENIX-2153 Fix a couple of Null pointer dereferences(Alicia Ying Shu)

2016-02-24 Thread maryannxue
PHOENIX-2153 Fix a couple of Null pointer dereferences(Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e4d569cd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e4d569cd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e4d569cd

Branch: refs/heads/calcite
Commit: e4d569cd8bda5e7c828d3bae9b12165b0272b67a
Parents: fa58fc5
Author: Rajeshbabu Chintaguntla 
Authored: Mon Feb 8 16:06:41 2016 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Mon Feb 8 16:06:41 2016 +0530

--
 .../main/java/org/apache/phoenix/expression/InListExpression.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e4d569cd/phoenix-core/src/main/java/org/apache/phoenix/expression/InListExpression.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/InListExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/InListExpression.java
index b6d5a24..a4a9353 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/InListExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/InListExpression.java
@@ -87,7 +87,7 @@ public class InListExpression extends BaseSingleExpression {
 }
 }
 if (coercedKeyExpressions.size() == 1) {
-throw sqlE;
+throw sqlE != null ? sqlE : new SQLException("Only one element in 
IN list");
 }
 if (coercedKeyExpressions.size() == 2 && addedNull) {
 return LiteralExpression.newConstant(null, PBoolean.INSTANCE, 
Determinism.ALWAYS);



[21/50] [abbrv] phoenix git commit: PHOENIX-2657 Transactionally deleted cells become visible after few hours

2016-02-24 Thread maryannxue
PHOENIX-2657 Transactionally deleted cells become visible after few hours


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/04c3819f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/04c3819f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/04c3819f

Branch: refs/heads/calcite
Commit: 04c3819f0ca353f09eeb231424c09dbd3d9d5bb3
Parents: edd94b2
Author: James Taylor 
Authored: Sat Feb 13 00:00:34 2016 -0800
Committer: James Taylor 
Committed: Sat Feb 13 00:02:18 2016 -0800

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/04c3819f/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 70b1a79..fe79577 100644
--- a/pom.xml
+++ b/pom.xml
@@ -112,7 +112,7 @@
 2.1.2
 1.6.0
 8.1.7.v20120910
-0.6.4
+0.6.5
 1.5.2
 2.10.4
 2.10



[08/50] [abbrv] phoenix git commit: PHOENIX-2653 Use data.tx.zookeeper.quorum property to initialize TransactionServiceClient falling back to HBase ZK quorum setting

2016-02-24 Thread maryannxue
PHOENIX-2653 Use data.tx.zookeeper.quorum property to initialize 
TransactionServiceClient falling back to HBase ZK quorum setting


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/39a982db
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/39a982db
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/39a982db

Branch: refs/heads/calcite
Commit: 39a982db98f52b33decb30ec51ca4b92a230abd2
Parents: b0122a5
Author: Thomas D'Silva 
Authored: Mon Feb 8 12:11:05 2016 -0800
Committer: Thomas D'Silva 
Committed: Mon Feb 8 12:27:58 2016 -0800

--
 .../phoenix/query/ConnectionQueryServicesImpl.java   | 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/39a982db/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 8eba40b..f2a4512 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -187,6 +187,12 @@ import org.apache.twill.zookeeper.ZKClients;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import co.cask.tephra.TransactionSystemClient;
+import co.cask.tephra.TxConstants;
+import co.cask.tephra.distributed.PooledClientProvider;
+import co.cask.tephra.distributed.TransactionServiceClient;
+import co.cask.tephra.hbase98.coprocessor.TransactionProcessor;
+
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Throwables;
@@ -346,7 +352,10 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 }
 
 private void initTxServiceClient() {
-String zkQuorumServersString = 
connectionInfo.getZookeeperQuorum()+":"+connectionInfo.getPort();
+String zkQuorumServersString = 
this.getProps().get(TxConstants.Service.CFG_DATA_TX_ZOOKEEPER_QUORUM);
+if (zkQuorumServersString==null) {
+zkQuorumServersString = 
connectionInfo.getZookeeperQuorum()+":"+connectionInfo.getPort();
+}
 ZKClientService zkClientService = ZKClientServices.delegate(
   ZKClients.reWatchOnExpire(
 ZKClients.retryOnFailure(



[26/50] [abbrv] phoenix git commit: PHOENIX-2635 Partial index rebuild doesn't work for mutable data

2016-02-24 Thread maryannxue
PHOENIX-2635 Partial index rebuild doesn't work for mutable data


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/046bda34
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/046bda34
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/046bda34

Branch: refs/heads/calcite
Commit: 046bda34771aaec3befd4ad17024afc5af9b83ed
Parents: e2a6386
Author: James Taylor 
Authored: Mon Feb 15 00:33:05 2016 -0800
Committer: James Taylor 
Committed: Mon Feb 15 10:14:54 2016 -0800

--
 .../end2end/index/MutableIndexFailureIT.java| 379 +++
 .../end2end/index/ReadOnlyIndexFailureIT.java   |  75 ++--
 .../EndToEndCoveredColumnsIndexBuilderIT.java   |   2 +-
 .../coprocessor/BaseScannerRegionObserver.java  |   5 +-
 .../coprocessor/MetaDataRegionObserver.java | 120 +-
 .../hbase/index/covered/LocalTableState.java|  19 +-
 .../phoenix/hbase/index/covered/TableState.java |   7 +-
 .../index/covered/data/LocalHBaseState.java |   6 +-
 .../hbase/index/covered/data/LocalTable.java|   9 +-
 .../example/CoveredColumnIndexCodec.java|   4 +-
 .../hbase/index/scanner/ScannerBuilder.java |   1 -
 .../apache/phoenix/index/IndexMaintainer.java   |   4 +-
 .../apache/phoenix/index/PhoenixIndexCodec.java |  12 +-
 .../phoenix/index/PhoenixIndexMetaData.java |  10 +-
 .../index/PhoenixTransactionalIndexer.java  |   2 +-
 .../org/apache/phoenix/jdbc/PhoenixDriver.java  |  32 +-
 .../apache/phoenix/parse/NamedTableNode.java|   8 +
 .../phoenix/query/QueryServicesOptions.java |   2 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  34 +-
 .../org/apache/phoenix/util/PhoenixRuntime.java |   8 +-
 .../index/covered/TestLocalTableState.java  |  10 +-
 .../example/TestCoveredColumnIndexCodec.java|   4 +-
 22 files changed, 368 insertions(+), 385 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/046bda34/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 176c5a0..ebc6988 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -30,24 +30,17 @@ import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.List;
 import java.util.Map;
 import java.util.Properties;
-import java.util.Timer;
-import java.util.TimerTask;
-import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HBaseCluster;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
+import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
 import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.query.QueryServices;
@@ -61,7 +54,6 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
@@ -75,28 +67,29 @@ import com.google.common.collect.Maps;
  * For some reason dropping tables after running this test
  * fails unless it runs its own mini cluster. 
  * 
- * 
- * @since 2.1
  */
 
 @Category(NeedsOwnMiniClusterTest.class)
 @RunWith(Parameterized.class)
 public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
-private Timer scheduleTimer;
-
+public static volatile boolean FAIL_WRITE = false;
+public static final String INDEX_NAME = "IDX";
+
 private String tableName;
  

[18/50] [abbrv] phoenix git commit: PHOENIX-2659 Incorrect argument parsing and bad command for queryserver.py (Josh Elser)

2016-02-24 Thread maryannxue
PHOENIX-2659 Incorrect argument parsing and bad command for
 queryserver.py (Josh Elser)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0c1fd3ad
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0c1fd3ad
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0c1fd3ad

Branch: refs/heads/calcite
Commit: 0c1fd3ad503207079bd20fc0d04c0409280c305b
Parents: 980eb36
Author: James Taylor 
Authored: Fri Feb 12 10:57:23 2016 -0800
Committer: James Taylor 
Committed: Fri Feb 12 10:57:23 2016 -0800

--
 bin/queryserver.py | 14 +++---
 1 file changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0c1fd3ad/bin/queryserver.py
--
diff --git a/bin/queryserver.py b/bin/queryserver.py
index d4228b3..c80e629 100755
--- a/bin/queryserver.py
+++ b/bin/queryserver.py
@@ -55,14 +55,19 @@ if len(args) > 1:
 command = 'stop'
 elif args[1] == 'makeWinServiceDesc':
 command = 'makeWinServiceDesc'
+
 if command:
+# Pull off queryserver.py and the command
 args = args[2:]
+else:
+# Just pull off queryserver.py
+args = args[1:]
 
 if os.name == 'nt':
-args = subprocess.list2cmdline(args[1:])
+args = subprocess.list2cmdline(args)
 else:
 import pipes# pipes module isn't available on Windows
-args = " ".join([pipes.quote(v) for v in args[1:]])
+args = " ".join([pipes.quote(v) for v in args])
 
 # HBase configuration folder path (where hbase-site.xml reside) for
 # HBase/Phoenix client side property override
@@ -119,7 +124,9 @@ else:
 
 #" -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y,suspend=n " 
+ \
 #" -XX:+UnlockCommercialFeatures -XX:+FlightRecorder 
-XX:FlightRecorderOptions=defaultrecording=true,dumponexit=true" + \
-java_cmd = '%(java)s $PHOENIX_OPTS -cp ' + hbase_config_path + os.pathsep + 
hadoop_config_path + os.pathsep + \
+
+# The command is run through subprocess so environment variables are 
automatically inherited
+java_cmd = '%(java)s -cp ' + hbase_config_path + os.pathsep + 
hadoop_config_path + os.pathsep + \
 phoenix_utils.phoenix_queryserver_jar + os.pathsep + 
phoenix_utils.phoenix_client_jar + \
 " -Dproc_phoenixserver" + \
 " -Dlog4j.configuration=file:" + os.path.join(phoenix_utils.current_dir, 
"log4j.properties") + \
@@ -201,5 +208,6 @@ elif command == 'stop':
 else:
 # run in the foreground using defaults from log4j.properties
 cmd = java_cmd % {'java': java, 'root_logger': 'INFO,console', 'log_dir': 
'.', 'log_file': 'psql.log'}
+# Because shell=True is not set, we don't have to alter the environment
 child = subprocess.Popen(cmd.split())
 sys.exit(child.wait())



[20/50] [abbrv] phoenix git commit: PHOENIX-2674 PhoenixMapReduceUtil#setInput doesn't honor condition clause (addendum)

2016-02-24 Thread maryannxue
PHOENIX-2674 PhoenixMapReduceUtil#setInput doesn't honor condition clause 
(addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/edd94b28
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/edd94b28
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/edd94b28

Branch: refs/heads/calcite
Commit: edd94b28ab46877aa15e94713274516619fa43b1
Parents: 8ece81b
Author: Jesse Yates 
Authored: Fri Feb 12 15:46:11 2016 -0800
Committer: Jesse Yates 
Committed: Fri Feb 12 15:46:51 2016 -0800

--
 .../org/apache/phoenix/mapreduce/util/PhoenixMapReduceUtil.java | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/edd94b28/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMapReduceUtil.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMapReduceUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMapReduceUtil.java
index 125c6a8..98f0364 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMapReduceUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMapReduceUtil.java
@@ -58,8 +58,9 @@ public final class PhoenixMapReduceUtil {
  * @param inputQuery  Select query.
  */
 public static void setInput(final Job job, final Class inputClass, final String tableName, final String inputQuery) {
-  final Configuration configuration = setInput(job, inputClass, 
tableName);
-  PhoenixConfigurationUtil.setSchemaType(configuration, 
SchemaType.QUERY);
+final Configuration configuration = setInput(job, inputClass, 
tableName);
+PhoenixConfigurationUtil.setInputQuery(configuration, inputQuery);
+PhoenixConfigurationUtil.setSchemaType(configuration, 
SchemaType.QUERY);
  }
 
 private static Configuration setInput(final Job job, final Class inputClass, final String tableName){



[32/50] [abbrv] phoenix git commit: PHOENIX-2666 Performance regression: Aggregate query with filter on table with multiple column families

2016-02-24 Thread maryannxue
PHOENIX-2666 Performance regression: Aggregate query with filter on table with 
multiple column families


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/65445738
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/65445738
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/65445738

Branch: refs/heads/calcite
Commit: 6544573832324789f8cbd4531aa6614145c9eb7d
Parents: 4ccce0e
Author: James Taylor 
Authored: Tue Feb 16 17:15:57 2016 -0800
Committer: James Taylor 
Committed: Tue Feb 16 17:15:57 2016 -0800

--
 .../phoenix/iterate/BaseResultIterators.java| 34 ++--
 1 file changed, 17 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/65445738/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 2352e94..d8256d7 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -18,10 +18,10 @@
 package org.apache.phoenix.iterate;
 
 import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.EXPECTED_UPPER_REGION_KEY;
+import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.SCAN_ACTUAL_START_ROW;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_FAILED_QUERY_COUNTER;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_QUERY_TIMEOUT_COUNTER;
 import static org.apache.phoenix.util.ByteUtil.EMPTY_BYTE_ARRAY;
-import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.SCAN_ACTUAL_START_ROW;
 
 import java.io.ByteArrayInputStream;
 import java.io.DataInput;
@@ -179,8 +179,15 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 // Project the one column family. We must project a column 
family since it's possible
 // that there are other non declared column families that 
we need to ignore.
 
scan.addFamily(table.getColumnFamilies().get(0).getName().getBytes());
-} else {
-if (projector.projectEveryRow()) {
+} else if (projector.projectEveryRow()) {
+if (table.getViewType() == ViewType.MAPPED) {
+// Since we don't have the empty key value in MAPPED 
tables, 
+// we must select all CFs in HRS. However, only the
+// selected column values are returned back to client.
+for (PColumnFamily family : table.getColumnFamilies()) 
{
+scan.addFamily(family.getName().getBytes());
+}
+} else {
 byte[] ecf = SchemaUtil.getEmptyColumnFamily(table);
 // Project empty key value unless the column family 
containing it has
 // been projected in its entirety.
@@ -188,32 +195,25 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 scan.addColumn(ecf, 
QueryConstants.EMPTY_COLUMN_BYTES);
 }
 }
-}
-if (table.getViewType() == ViewType.MAPPED) {
-if (projector.projectEveryRow()) {
-// Since we don't have the empty key value in MAPPED 
tables, 
-// we must select all CFs in HRS. However, only the
-// selected column values are returned back to client.
-for (PColumnFamily family : table.getColumnFamilies()) 
{
-scan.addFamily(family.getName().getBytes());
-}
+} else {
+for (Pair whereColumn : 
context.getWhereConditionColumns()) {
+scan.addColumn(whereColumn.getFirst(), 
whereColumn.getSecond());
 }
-} 
+}
 }
 // Add FirstKeyOnlyFilter if there are no references to key value 
columns
 if (keyOnlyFilter) {
 ScanUtil.andFilterAtBeginning(scan, new FirstKeyOnlyFilter());
 }
-
-// TODO adding all CFs here is not correct. It should be done only 
after ColumnProjectionOptimization.
+
 if (perScanLimit != null) {
 ScanUtil.andFilterAtEnd(scan, new PageFilter(perScanLimit));
 

[02/50] [abbrv] phoenix git commit: PHOENIX-2169 Illegal data error on UPSERT SELECT and JOIN with salted tables(Ankit Singhal)

2016-02-24 Thread maryannxue
PHOENIX-2169 Illegal data error on UPSERT SELECT and JOIN with salted 
tables(Ankit Singhal)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/79724226
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/79724226
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/79724226

Branch: refs/heads/calcite
Commit: 79724226c737c905b748d73fe8f4d70dca743578
Parents: 12f6a6f
Author: Ankit Singhal 
Authored: Thu Feb 4 15:27:04 2016 +0530
Committer: Ankit Singhal 
Committed: Thu Feb 4 15:27:04 2016 +0530

--
 .../salted/SaltedTableUpsertSelectIT.java   | 57 
 .../expression/ProjectedColumnExpression.java   | 11 +++-
 .../visitor/CloneExpressionVisitor.java |  2 +-
 3 files changed, 68 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/79724226/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/SaltedTableUpsertSelectIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/SaltedTableUpsertSelectIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/SaltedTableUpsertSelectIT.java
index 0a11ec7..65eeb20 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/SaltedTableUpsertSelectIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/SaltedTableUpsertSelectIT.java
@@ -225,4 +225,61 @@ public class SaltedTableUpsertSelectIT extends 
BaseHBaseManagedTimeIT {
 conn.close();
 }
 }
+
+@Test
+public void testUpsertSelectWithJoinOnSaltedTables() throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.setAutoCommit(false);
+try {
+String ddl = "CREATE TABLE IF NOT EXISTS source1" +
+" (pk1 varchar NULL, pk2 varchar NULL, pk3 integer NOT 
NULL, col1 INTEGER" +
+" CONSTRAINT pk PRIMARY KEY (pk1, pk2, pk3)) 
SALT_BUCKETS=4";
+createTestTable(getUrl(), ddl);
+
+for (int i = 0; i < 1000; i++) {
+String upsert = "UPSERT INTO source1(pk1, pk2, pk3, col1) 
VALUES (?,?,?,?)";
+PreparedStatement stmt = conn.prepareStatement(upsert);
+stmt.setString(1, Integer.toString(i));
+stmt.setString(2, Integer.toString(i));
+stmt.setInt(3, i);
+stmt.setInt(4, i);
+stmt.execute();
+}
+conn.commit();
+
+String ddl2 = "CREATE TABLE IF NOT EXISTS source2" +
+" (pk1 varchar NULL, pk2 varchar NULL, pk3 integer NOT 
NULL, col1 INTEGER" +
+" CONSTRAINT pk PRIMARY KEY (pk1, pk2, pk3)) 
SALT_BUCKETS=4";
+createTestTable(getUrl(), ddl2);
+
+for (int i = 0; i < 1000; i++) {
+String upsert = "UPSERT INTO source2(pk1, pk2, pk3, col1) 
VALUES (?,?,?,?)";
+PreparedStatement stmt = conn.prepareStatement(upsert);
+stmt.setString(1, Integer.toString(i));
+stmt.setString(2, Integer.toString(i));
+stmt.setInt(3, i);
+stmt.setInt(4, i);
+stmt.execute();
+}
+conn.commit();
+
+String ddl3 = "CREATE TABLE IF NOT EXISTS dest" +
+" (pk1 varchar NULL, pk2 varchar NULL, pk3 integer NOT 
NULL, col1 INTEGER" +
+" CONSTRAINT pk PRIMARY KEY (pk1, pk2, pk3)) 
SALT_BUCKETS=4";
+createTestTable(getUrl(), ddl3);
+
+String query = "UPSERT INTO dest(pk1, pk2, pk3, col1) SELECT 
S1.pk1, S1.pk2, S2.pk3, S2.col1 FROM source1 AS S1 JOIN source2 AS S2 ON S1.pk1 
= S2.pk1 AND S1.pk2 = S2.pk2 AND S1.pk3 = S2.pk3";
+conn.createStatement().execute(query);
+conn.commit();
+
+query = "SELECT COUNT(*) FROM dest";
+PreparedStatement stmt = conn.prepareStatement(query);
+ResultSet rs = stmt.executeQuery();
+assertTrue(rs.next());
+assertEquals(1000, rs.getInt(1));
+} finally {
+conn.close();
+}
+}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/79724226/phoenix-core/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java
index 89619bf..3a38dee 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/P

[17/50] [abbrv] phoenix git commit: PHOENIX-2657 Transactionally deleted cells become visible after few hours

2016-02-24 Thread maryannxue
PHOENIX-2657 Transactionally deleted cells become visible after few hours


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/980eb36e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/980eb36e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/980eb36e

Branch: refs/heads/calcite
Commit: 980eb36e251e917343d1376b2ac6f8c57d223c35
Parents: 0c21539
Author: James Taylor 
Authored: Fri Feb 12 09:38:43 2016 -0800
Committer: James Taylor 
Committed: Fri Feb 12 09:38:43 2016 -0800

--
 .../apache/phoenix/filter/SkipScanFilter.java   | 47 +---
 .../phoenix/index/PhoenixIndexBuilder.java  |  4 +-
 .../index/PhoenixTransactionalIndexer.java  |  5 ++-
 3 files changed, 38 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/980eb36e/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java 
b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
index 00320ce..c966d91 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
@@ -43,8 +43,6 @@ import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ScanUtil.BytesComparator;
 import org.apache.phoenix.util.SchemaUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Objects;
 import com.google.common.collect.Lists;
@@ -63,8 +61,6 @@ import com.google.common.hash.Hashing;
  * @since 0.1
  */
 public class SkipScanFilter extends FilterBase implements Writable {
-private static final Logger logger = 
LoggerFactory.getLogger(SkipScanFilter.class);
-
 private enum Terminate {AT, AFTER};
 // Conjunctive normal form of or-ed ranges or point lookups
 private List> slots;
@@ -72,6 +68,7 @@ public class SkipScanFilter extends FilterBase implements 
Writable {
 private int[] slotSpan;
 // schema of the row key
 private RowKeySchema schema;
+private boolean includeMultipleVersions;
 // current position for each slot
 private int[] position;
 // buffer used for skip hint
@@ -94,19 +91,27 @@ public class SkipScanFilter extends FilterBase implements 
Writable {
 public SkipScanFilter() {
 }
 
+public SkipScanFilter(SkipScanFilter filter, boolean 
includeMultipleVersions) {
+this(filter.slots, filter.slotSpan, filter.schema, 
includeMultipleVersions);
+}
+
 public SkipScanFilter(List> slots, RowKeySchema schema) {
 this(slots, ScanUtil.getDefaultSlotSpans(slots.size()), schema);
 }
 
 public SkipScanFilter(List> slots, int[] slotSpan, 
RowKeySchema schema) {
-init(slots, slotSpan, schema);
+this(slots, slotSpan, schema, false);
+}
+
+private SkipScanFilter(List> slots, int[] slotSpan, 
RowKeySchema schema, boolean includeMultipleVersions) {
+init(slots, slotSpan, schema, includeMultipleVersions);
 }
 
 public void setOffset(int offset) {
 this.offset = offset;
 }
 
-private void init(List> slots, int[] slotSpan, RowKeySchema 
schema) {
+private void init(List> slots, int[] slotSpan, RowKeySchema 
schema, boolean includeMultipleVersions) {
 for (List ranges : slots) {
 if (ranges.isEmpty()) {
 throw new IllegalStateException();
@@ -117,9 +122,10 @@ public class SkipScanFilter extends FilterBase implements 
Writable {
 this.schema = schema;
 this.maxKeyLength = SchemaUtil.getMaxKeyLength(schema, slots);
 this.position = new int[slots.size()];
-startKey = new byte[maxKeyLength];
-endKey = new byte[maxKeyLength];
-endKeyLength = 0;
+this.startKey = new byte[maxKeyLength];
+this.endKey = new byte[maxKeyLength];
+this.endKeyLength = 0;
+this.includeMultipleVersions = includeMultipleVersions;
 }
 
 // Exposed for testing.
@@ -345,15 +351,20 @@ public class SkipScanFilter extends FilterBase implements 
Writable {
 return i;
 }
 
+private ReturnCode getIncludeReturnCode() {
+return includeMultipleVersions ? ReturnCode.INCLUDE : 
ReturnCode.INCLUDE_AND_NEXT_COL;
+}
+
 @edu.umd.cs.findbugs.annotations.SuppressWarnings(
 value="QBA_QUESTIONABLE_BOOLEAN_ASSIGNMENT", 
 justification="Assignment designed to work this way.")
 private ReturnCode navigate(final byte[] currentKey, final int offset, 
final int length, Terminate terminate) {
 int nSlots = slots.size();
+
 // Fir

[33/50] [abbrv] phoenix git commit: PHOENIX-2680 stats table timestamp incorrectly used as table timestamp

2016-02-24 Thread maryannxue
PHOENIX-2680 stats table timestamp incorrectly used as table timestamp


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d8e5a73b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d8e5a73b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d8e5a73b

Branch: refs/heads/calcite
Commit: d8e5a73be52c798b525068382e0376cb2d79e372
Parents: 6544573
Author: James Taylor 
Authored: Tue Feb 16 17:17:54 2016 -0800
Committer: James Taylor 
Committed: Tue Feb 16 17:17:54 2016 -0800

--
 .../UngroupedAggregateRegionObserver.java   | 10 +-
 .../phoenix/schema/stats/StatisticsCollector.java   | 16 ++--
 2 files changed, 11 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d8e5a73b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 05cf08e..e000e25 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -80,7 +80,6 @@ import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.index.PhoenixIndexCodec;
 import org.apache.phoenix.join.HashJoinInfo;
 import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.ConstraintViolationException;
 import org.apache.phoenix.schema.PColumn;
@@ -610,20 +609,13 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 InternalScanner internalScanner = scanner;
 if (scanType.equals(ScanType.COMPACT_DROP_DELETES)) {
 try {
-boolean useCurrentTime = 
c.getEnvironment().getConfiguration().getBoolean(
-QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
-QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME);
 Connection conn = 
c.getEnvironment().getRegionServerServices().getConnection();
 Pair mergeRegions = null;
 if (store.hasReferences()) {
 mergeRegions = 
MetaTableAccessor.getRegionsFromMergeQualifier(conn,
 
c.getEnvironment().getRegion().getRegionInfo().getRegionName());
 }
-// Provides a means of clients controlling their timestamps to 
not use current time
-// when background tasks are updating stats. Instead we track 
the max timestamp of
-// the cells and use that.
-long clientTimeStamp = useCurrentTime ? 
TimeKeeper.SYSTEM.getCurrentTime()
-: StatisticsCollector.NO_TIMESTAMP;
+long clientTimeStamp = TimeKeeper.SYSTEM.getCurrentTime();
 StatisticsCollector stats = new 
StatisticsCollector(c.getEnvironment(), table.getNameAsString(),
 clientTimeStamp, store.getFamily().getName());
 internalScanner = 
stats.createCompactionScanner(c.getEnvironment(), store, scanner, mergeRegions);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d8e5a73b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
index 75e0aa0..1d0204f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
@@ -63,11 +63,6 @@ public class StatisticsCollector {
 protected StatisticsWriter statsTable;
 private Pair cachedGps = null;
 
-public StatisticsCollector(RegionCoprocessorEnvironment env, String 
tableName, long clientTimeStamp)
-throws IOException {
-this(env, tableName, clientTimeStamp, null, null, null);
-}
-
 public StatisticsCollector(RegionCoprocessorEnvironment env, String 
tableName, long clientTimeStamp,
 byte[] gp_width_bytes, byte[] gp_per_region_bytes) throws 
IOException {
 this(env, tableName, clientTimeStamp, null, gp_width_bytes, 
gp_per_region_bytes);
@@ -78,7 +73,7 @@ public class StatisticsCollector {
 this(env, tableName, clientTimeS

[37/50] [abbrv] phoenix git commit: PHOENIX-2688 Remove unused mergeRegions for statistics collection

2016-02-24 Thread maryannxue
PHOENIX-2688 Remove unused mergeRegions for statistics collection


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5127a656
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5127a656
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5127a656

Branch: refs/heads/calcite
Commit: 5127a6565ea286222ea0ebf9f61fd5737d5b8009
Parents: c316d91
Author: James Taylor 
Authored: Wed Feb 17 12:27:11 2016 -0800
Committer: James Taylor 
Committed: Wed Feb 17 12:27:11 2016 -0800

--
 .../coprocessor/UngroupedAggregateRegionObserver.java |  9 +
 .../apache/phoenix/schema/stats/StatisticsCollector.java  | 10 --
 .../apache/phoenix/schema/stats/StatisticsScanner.java|  5 +
 3 files changed, 6 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5127a656/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index e000e25..d850eab 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -44,9 +44,7 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -609,16 +607,11 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 InternalScanner internalScanner = scanner;
 if (scanType.equals(ScanType.COMPACT_DROP_DELETES)) {
 try {
-Connection conn = 
c.getEnvironment().getRegionServerServices().getConnection();
 Pair mergeRegions = null;
-if (store.hasReferences()) {
-mergeRegions = 
MetaTableAccessor.getRegionsFromMergeQualifier(conn,
-
c.getEnvironment().getRegion().getRegionInfo().getRegionName());
-}
 long clientTimeStamp = TimeKeeper.SYSTEM.getCurrentTime();
 StatisticsCollector stats = new 
StatisticsCollector(c.getEnvironment(), table.getNameAsString(),
 clientTimeStamp, store.getFamily().getName());
-internalScanner = 
stats.createCompactionScanner(c.getEnvironment(), store, scanner, mergeRegions);
+internalScanner = 
stats.createCompactionScanner(c.getEnvironment(), store, scanner);
 } catch (IOException e) {
 // If we can't reach the stats table, don't interrupt the 
normal
 // compaction operation, just log a warning.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5127a656/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
index 1d0204f..185ceb8 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
@@ -24,7 +24,6 @@ import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -197,19 +196,18 @@ public class StatisticsCollector {
 }
 }
 
-public InternalScanner 
createCompactionScanner(RegionCoprocessorEnvironment env, Store store, 
InternalScanner s,
-Pair mergeRegions) throws IOException {
+public InternalScanner 
createCompactionScanner(RegionCoprocessorEnvironment env, Store store, 
InternalScanner s) throws IOException {
 // See if this is for Major compaction
 if (logger.isDebugEnabled()) {
 logger.debug("Compaction scanner created for stats");
 }
 ImmutableBytesPtr cfKey = new 
ImmutableBytesPtr(store.getFamily().

[45/50] [abbrv] phoenix git commit: PHOENIX-2697 Provide a SERIAL hint.

2016-02-24 Thread maryannxue
PHOENIX-2697 Provide a SERIAL hint.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0b1a180f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0b1a180f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0b1a180f

Branch: refs/heads/calcite
Commit: 0b1a180f1d9acc23cf58ecfc84a67aac110160cd
Parents: 61fa462
Author: Lars Hofhansl 
Authored: Sat Feb 20 19:36:35 2016 -0800
Committer: Lars Hofhansl 
Committed: Sat Feb 20 19:36:35 2016 -0800

--
 .../apache/phoenix/execute/AggregatePlan.java   | 16 ++-
 .../org/apache/phoenix/execute/ScanPlan.java| 14 -
 .../apache/phoenix/iterate/ExplainTable.java|  2 +-
 .../apache/phoenix/iterate/SerialIterators.java |  3 ++-
 .../java/org/apache/phoenix/parse/HintNode.java |  4 
 .../compile/StatementHintsCompilationTest.java  | 21 +++-
 6 files changed, 43 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0b1a180f/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
index 3de4e68..73a995c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
@@ -48,10 +48,13 @@ import org.apache.phoenix.iterate.ParallelIterators;
 import org.apache.phoenix.iterate.ParallelScanGrouper;
 import org.apache.phoenix.iterate.PeekingResultIterator;
 import org.apache.phoenix.iterate.ResultIterator;
+import org.apache.phoenix.iterate.ResultIterators;
 import org.apache.phoenix.iterate.SequenceResultIterator;
+import org.apache.phoenix.iterate.SerialIterators;
 import org.apache.phoenix.iterate.SpoolingResultIterator;
 import org.apache.phoenix.iterate.UngroupedAggregatingResultIterator;
 import org.apache.phoenix.parse.FilterableStatement;
+import org.apache.phoenix.parse.HintNode;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
@@ -194,16 +197,19 @@ public class AggregatePlan extends BaseQueryPlan {
 
context.getScan().setAttribute(BaseScannerRegionObserver.GROUP_BY_LIMIT, 
PInteger.INSTANCE.toBytes(limit));
 }
 }
-ParallelIterators parallelIterators = new ParallelIterators(this, 
null, wrapParallelIteratorFactory());
-splits = parallelIterators.getSplits();
-scans = parallelIterators.getScans();
+ResultIterators iterators = 
statement.getHint().hasHint(HintNode.Hint.SERIAL) ?
+new SerialIterators(this, null, wrapParallelIteratorFactory(), 
scanGrouper) :
+new ParallelIterators(this, null, 
wrapParallelIteratorFactory());
+
+splits = iterators.getSplits();
+scans = iterators.getScans();
 
 AggregatingResultIterator aggResultIterator;
 // No need to merge sort for ungrouped aggregation
 if (groupBy.isEmpty()) {
-aggResultIterator = new UngroupedAggregatingResultIterator(new 
ConcatResultIterator(parallelIterators), aggregators);
+aggResultIterator = new UngroupedAggregatingResultIterator(new 
ConcatResultIterator(iterators), aggregators);
 } else {
-aggResultIterator = new GroupedAggregatingResultIterator(new 
MergeSortRowKeyResultIterator(parallelIterators), aggregators);
+aggResultIterator = new GroupedAggregatingResultIterator(new 
MergeSortRowKeyResultIterator(iterators), aggregators);
 }
 
 if (having != null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0b1a180f/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
index f4c570c..d51e6c8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
@@ -46,6 +46,7 @@ import org.apache.phoenix.iterate.SequenceResultIterator;
 import org.apache.phoenix.iterate.SerialIterators;
 import org.apache.phoenix.iterate.SpoolingResultIterator;
 import org.apache.phoenix.parse.FilterableStatement;
+import org.apache.phoenix.parse.HintNode;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryConstants;
@@ -85,7 +86,7 @@ public class ScanPlan extends BaseQueryPlan {

[09/50] [abbrv] phoenix git commit: PHOENIX-2653 Use data.tx.zookeeper.quorum property to initialize TransactionServiceClient falling back to HBase ZK quorum setting (addendum)

2016-02-24 Thread maryannxue
PHOENIX-2653 Use data.tx.zookeeper.quorum property to initialize 
TransactionServiceClient falling back to HBase ZK quorum setting (addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e5e9144f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e5e9144f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e5e9144f

Branch: refs/heads/calcite
Commit: e5e9144f4e98803902174858051be58e9edcca11
Parents: 39a982d
Author: Thomas D'Silva 
Authored: Mon Feb 8 12:37:14 2016 -0800
Committer: Thomas D'Silva 
Committed: Mon Feb 8 12:37:14 2016 -0800

--
 .../apache/phoenix/query/ConnectionQueryServicesImpl.java| 8 +---
 1 file changed, 1 insertion(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e5e9144f/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index f2a4512..ceb1bbb 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -191,7 +191,7 @@ import co.cask.tephra.TransactionSystemClient;
 import co.cask.tephra.TxConstants;
 import co.cask.tephra.distributed.PooledClientProvider;
 import co.cask.tephra.distributed.TransactionServiceClient;
-import co.cask.tephra.hbase98.coprocessor.TransactionProcessor;
+import co.cask.tephra.hbase11.coprocessor.TransactionProcessor;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
@@ -205,12 +205,6 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
-import co.cask.tephra.TransactionSystemClient;
-import co.cask.tephra.TxConstants;
-import co.cask.tephra.distributed.PooledClientProvider;
-import co.cask.tephra.distributed.TransactionServiceClient;
-import co.cask.tephra.hbase11.coprocessor.TransactionProcessor;
-
 
 public class ConnectionQueryServicesImpl extends DelegateQueryServices 
implements ConnectionQueryServices {
 private static final Logger logger = 
LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);



[07/50] [abbrv] phoenix git commit: PHOENIX-2605 Enhance IndexToolIT to test transactional tables

2016-02-24 Thread maryannxue
PHOENIX-2605 Enhance IndexToolIT to test transactional tables


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b0122a54
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b0122a54
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b0122a54

Branch: refs/heads/calcite
Commit: b0122a541325fd7e40e62e3602eb0ad748b94a4f
Parents: e4d569c
Author: Thomas D'Silva 
Authored: Fri Jan 29 14:10:11 2016 -0800
Committer: Thomas D'Silva 
Committed: Mon Feb 8 11:38:47 2016 -0800

--
 .../phoenix/end2end/ContextClassloaderIT.java   |   2 +-
 .../phoenix/end2end/CsvBulkLoadToolIT.java  |  87 ++
 .../org/apache/phoenix/end2end/IndexToolIT.java | 273 +++
 .../phoenix/end2end/MutableIndexToolIT.java | 128 +
 .../phoenix/end2end/UserDefinedFunctionsIT.java |   8 +-
 .../end2end/index/DropIndexDuringUpsertIT.java  |   2 +-
 .../index/MutableIndexReplicationIT.java|   2 +-
 .../example/EndToEndCoveredIndexingIT.java  |   5 +-
 .../coprocessor/BaseScannerRegionObserver.java  |   7 +
 .../phoenix/mapreduce/PhoenixInputFormat.java   |  11 +-
 .../phoenix/mapreduce/index/IndexTool.java  |   9 +-
 .../index/PhoenixIndexImportDirectMapper.java   |   7 +-
 .../index/PhoenixIndexImportMapper.java |   6 +-
 .../util/PhoenixConfigurationUtil.java  |   6 +-
 .../apache/phoenix/schema/MetaDataClient.java   |   1 -
 .../java/org/apache/phoenix/query/BaseTest.java |  56 +++-
 16 files changed, 337 insertions(+), 273 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b0122a54/phoenix-core/src/it/java/org/apache/phoenix/end2end/ContextClassloaderIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ContextClassloaderIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ContextClassloaderIT.java
index 7d0e1da..4c67b32 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ContextClassloaderIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ContextClassloaderIT.java
@@ -62,7 +62,7 @@ public class ContextClassloaderIT  extends BaseTest {
 String clientPort = 
hbaseTestUtil.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
 String url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + 
JDBC_PROTOCOL_SEPARATOR + clientPort
 + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;
-driver = initAndRegisterDriver(url, ReadOnlyProps.EMPTY_PROPS);
+driver = initAndRegisterTestDriver(url, ReadOnlyProps.EMPTY_PROPS);
 
 Connection conn = DriverManager.getConnection(url);
 Statement stmt = conn.createStatement();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b0122a54/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
index 6bc03bf..26ec889 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
@@ -17,7 +17,6 @@
  */
 package org.apache.phoenix.end2end;
 
-import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
 import static org.apache.phoenix.query.QueryServices.DATE_FORMAT_ATTRIB;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
@@ -30,65 +29,37 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.Statement;
+import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.mapred.FileAlreadyExistsException;
-import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.mapreduce.CsvBulkLoadTool;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.util.DateUtil;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.junit.AfterClass;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.junit.experimental.categories.Category;
 
-@Category(NeedsOwnMiniClusterTest.class)
-public class CsvBulkLoadToolIT {
+import com.google.common.collect.Maps;
+
+public class CsvBulkLoadToolIT extends BaseOwnClusterHBaseManagedTimeIT {
 
-// We use HBaseTestUtil because we need to start up a MapReduc

Apache-Phoenix | 4.x-HBase-1.0 | Build Successful

2016-02-24 Thread Apache Jenkins Server
4.x-HBase-1.0 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/4.x-HBase-1.0

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.x-HBase-1.0/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.x-HBase-1.0/lastCompletedBuild/testReport/

Changes
[jtaylor] PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


Apache-Phoenix | Master | Build Successful

2016-02-24 Thread Apache Jenkins Server
Master branch build status Successful
Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/master

Last Successful Compiled Artifacts https://builds.apache.org/job/Phoenix-master/lastSuccessfulBuild/artifact/

Last Complete Test Report https://builds.apache.org/job/Phoenix-master/lastCompletedBuild/testReport/

Changes
[jtaylor] PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


phoenix git commit: PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client and server when upgraded from 4.6

2016-02-24 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 1cf2518cc -> cc08d751a


PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client 
and server when upgraded from 4.6


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cc08d751
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cc08d751
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cc08d751

Branch: refs/heads/4.x-HBase-0.98
Commit: cc08d751a7620ad98261ab3f86c9b9760415cf8d
Parents: 1cf2518
Author: James Taylor 
Authored: Wed Feb 24 17:25:51 2016 -0800
Committer: James Taylor 
Committed: Wed Feb 24 17:25:51 2016 -0800

--
 .../query/ConnectionQueryServicesImpl.java  | 65 
 .../stats/StatisticsCollectorFactory.java   | 17 -
 2 files changed, 68 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cc08d751/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 0f32adf..c8d10ed 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2363,24 +2363,24 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0, columnsToAdd);
 }
 if(currentServerSideTableTimeStamp < 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0) {
+// Drop old stats table so that new stats 
table is created
+metaConnection = 
dropStatsTable(metaConnection, 
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 4);
 // Add these columns one at a time, each 
with different timestamps so that if folks have
 // run the upgrade code already for a 
snapshot, we'll still enter this block (and do the
 // parts we haven't yet done).
-metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 4,
+metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 3,
 
PhoenixDatabaseMetaData.TRANSACTIONAL + " " + 
PBoolean.INSTANCE.getSqlTypeName());
-// Drop old stats table so that new stats 
table is created
-metaConnection = 
dropStatsTable(metaConnection,
-
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 3);
-metaConnection = 
addColumnsIfNotExists(metaConnection,
-
PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 2,
-
PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " "
-+ 
PLong.INSTANCE.getSqlTypeName());
-metaConnection = 
setImmutableTableIndexesImmutable(metaConnection, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 1);
-Properties props = 
PropertiesUtil.deepCopy(metaConnection.getClientInfo());
-
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0));
-metaConnection = new 
PhoenixConnection(metaConnection, ConnectionQueryServicesImpl.this, props);
-// that already have cached data.
+
PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " " + 
PLong.INSTANCE.getSqlTypeName());
+metaConnection = 
setImmutableTableIndexesImmutable(metaConnection, 
+   

phoenix git commit: PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client and server when upgraded from 4.6

2016-02-24 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/master 1f7b47a5e -> a6de19bd2


PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client 
and server when upgraded from 4.6


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a6de19bd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a6de19bd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a6de19bd

Branch: refs/heads/master
Commit: a6de19bd2290d8242b002ff661ec7028577e3055
Parents: 1f7b47a
Author: James Taylor 
Authored: Wed Feb 24 17:25:51 2016 -0800
Committer: James Taylor 
Committed: Wed Feb 24 17:28:26 2016 -0800

--
 .../query/ConnectionQueryServicesImpl.java  | 65 
 .../stats/StatisticsCollectorFactory.java   | 17 -
 2 files changed, 68 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a6de19bd/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index d27a4bc..37ebc78 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2367,24 +2367,24 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0, columnsToAdd);
 }
 if(currentServerSideTableTimeStamp < 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0) {
+// Drop old stats table so that new stats 
table is created
+metaConnection = 
dropStatsTable(metaConnection, 
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 4);
 // Add these columns one at a time, each 
with different timestamps so that if folks have
 // run the upgrade code already for a 
snapshot, we'll still enter this block (and do the
 // parts we haven't yet done).
-metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 4,
+metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 3,
 
PhoenixDatabaseMetaData.TRANSACTIONAL + " " + 
PBoolean.INSTANCE.getSqlTypeName());
-// Drop old stats table so that new stats 
table is created
-metaConnection = 
dropStatsTable(metaConnection,
-
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 3);
-metaConnection = 
addColumnsIfNotExists(metaConnection,
-
PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 2,
-
PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " "
-+ 
PLong.INSTANCE.getSqlTypeName());
-metaConnection = 
setImmutableTableIndexesImmutable(metaConnection, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 1);
-Properties props = 
PropertiesUtil.deepCopy(metaConnection.getClientInfo());
-
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0));
-metaConnection = new 
PhoenixConnection(metaConnection, ConnectionQueryServicesImpl.this, props);
-// that already have cached data.
+
PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " " + 
PLong.INSTANCE.getSqlTypeName());
+metaConnection = 
setImmutableTableIndexesImmutable(metaConnection, 
+
MetaDa

phoenix git commit: PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client and server when upgraded from 4.6

2016-02-24 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.0 76f17a5aa -> f907e3401


PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client 
and server when upgraded from 4.6


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f907e340
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f907e340
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f907e340

Branch: refs/heads/4.x-HBase-1.0
Commit: f907e340169457ffa480569c63bae73efd8126d0
Parents: 76f17a5
Author: James Taylor 
Authored: Wed Feb 24 17:25:51 2016 -0800
Committer: James Taylor 
Committed: Wed Feb 24 17:29:46 2016 -0800

--
 .../query/ConnectionQueryServicesImpl.java  | 65 
 .../stats/StatisticsCollectorFactory.java   | 17 -
 2 files changed, 68 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f907e340/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 1c2e26a..72fe43e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2363,24 +2363,24 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0, columnsToAdd);
 }
 if(currentServerSideTableTimeStamp < 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0) {
+// Drop old stats table so that new stats 
table is created
+metaConnection = 
dropStatsTable(metaConnection, 
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 4);
 // Add these columns one at a time, each 
with different timestamps so that if folks have
 // run the upgrade code already for a 
snapshot, we'll still enter this block (and do the
 // parts we haven't yet done).
-metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 4,
+metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
+
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 3,
 
PhoenixDatabaseMetaData.TRANSACTIONAL + " " + 
PBoolean.INSTANCE.getSqlTypeName());
-// Drop old stats table so that new stats 
table is created
-metaConnection = 
dropStatsTable(metaConnection,
-
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 3);
-metaConnection = 
addColumnsIfNotExists(metaConnection,
-
PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+metaConnection = 
addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 2,
-
PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " "
-+ 
PLong.INSTANCE.getSqlTypeName());
-metaConnection = 
setImmutableTableIndexesImmutable(metaConnection, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 1);
-Properties props = 
PropertiesUtil.deepCopy(metaConnection.getClientInfo());
-
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0));
-metaConnection = new 
PhoenixConnection(metaConnection, ConnectionQueryServicesImpl.this, props);
-// that already have cached data.
+
PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " " + 
PLong.INSTANCE.getSqlTypeName());
+metaConnection = 
setImmutableTableIndexesImmutable(metaConnection, 
+ 

phoenix git commit: PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client and server when upgraded from 4.6

2016-02-24 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/master a6de19bd2 -> 29ca38b85


PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client 
and server when upgraded from 4.6


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/29ca38b8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/29ca38b8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/29ca38b8

Branch: refs/heads/master
Commit: 29ca38b85a5f2455a007404aaa72c104f20a5afe
Parents: a6de19b
Author: James Taylor 
Authored: Wed Feb 24 17:44:08 2016 -0800
Committer: James Taylor 
Committed: Wed Feb 24 17:44:08 2016 -0800

--
 .../java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/29ca38b8/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 37ebc78..3327861 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2384,7 +2384,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 metaConnection = 
updateSystemCatalogTimestamp(metaConnection, 
 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0);
 
ConnectionQueryServicesImpl.this.removeTable(null, 
PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0);
-logger.warn("Update of SYSTEM.CATALOG 
complete");
+logger.info("Update of SYSTEM.CATALOG 
complete");

clearCache();
 }
 



phoenix git commit: PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client and server when upgraded from 4.6

2016-02-24 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 cc08d751a -> 2b386b2d3


PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client 
and server when upgraded from 4.6


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2b386b2d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2b386b2d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2b386b2d

Branch: refs/heads/4.x-HBase-0.98
Commit: 2b386b2d3eff2dc4921398906e43450a9c9e96cb
Parents: cc08d75
Author: James Taylor 
Authored: Wed Feb 24 17:44:08 2016 -0800
Committer: James Taylor 
Committed: Wed Feb 24 17:44:42 2016 -0800

--
 .../java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2b386b2d/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index c8d10ed..26e5cd7 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2380,7 +2380,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 metaConnection = 
updateSystemCatalogTimestamp(metaConnection, 
 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0);
 
ConnectionQueryServicesImpl.this.removeTable(null, 
PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0);
-logger.warn("Update of SYSTEM.CATALOG 
complete");
+logger.info("Update of SYSTEM.CATALOG 
complete");

clearCache();
 }
 



phoenix git commit: PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client and server when upgraded from 4.6

2016-02-24 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.0 f907e3401 -> 9ed29e5dd


PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7 client 
and server when upgraded from 4.6


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9ed29e5d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9ed29e5d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9ed29e5d

Branch: refs/heads/4.x-HBase-1.0
Commit: 9ed29e5ddea54bb940a6d0c12d0e7f54647c1a37
Parents: f907e34
Author: James Taylor 
Authored: Wed Feb 24 17:44:08 2016 -0800
Committer: James Taylor 
Committed: Wed Feb 24 17:44:59 2016 -0800

--
 .../java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9ed29e5d/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 72fe43e..4c42cf6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2380,7 +2380,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 metaConnection = 
updateSystemCatalogTimestamp(metaConnection, 
 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0);
 
ConnectionQueryServicesImpl.this.removeTable(null, 
PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, 
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0);
-logger.warn("Update of SYSTEM.CATALOG 
complete");
+logger.info("Update of SYSTEM.CATALOG 
complete");

clearCache();
 }
 



Apache-Phoenix | 4.x-HBase-0.98 | Build Successful

2016-02-24 Thread Apache Jenkins Server
4.x-HBase-0.98 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/4.x-HBase-0.98

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.x-HBase-0.98/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.x-HBase-0.98/lastCompletedBuild/testReport/

Changes
[jtaylor] PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7

[jtaylor] PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


Apache-Phoenix | Master | Build Successful

2016-02-24 Thread Apache Jenkins Server
Master branch build status Successful
Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/master

Last Successful Compiled Artifacts https://builds.apache.org/job/Phoenix-master/lastSuccessfulBuild/artifact/

Last Complete Test Report https://builds.apache.org/job/Phoenix-master/lastCompletedBuild/testReport/

Changes
[jtaylor] PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7

[jtaylor] PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


Apache-Phoenix | 4.x-HBase-1.0 | Build Successful

2016-02-24 Thread Apache Jenkins Server
4.x-HBase-1.0 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/4.x-HBase-1.0

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.x-HBase-1.0/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.x-HBase-1.0/lastCompletedBuild/testReport/

Changes
[jtaylor] PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7

[jtaylor] PHOENIX-2713 Backward compatibility fails with CNF exception with 4.7



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


[phoenix] Git Push Summary

2016-02-24 Thread mujtaba
Repository: phoenix
Updated Tags:  refs/tags/v4.7.0-HBase-0.98-rc3 [created] 6c8a16c01


[phoenix] Git Push Summary

2016-02-24 Thread mujtaba
Repository: phoenix
Updated Tags:  refs/tags/v4.7.0-HBase-1.1-rc3 [created] 903be79bc


[phoenix] Git Push Summary

2016-02-24 Thread mujtaba
Repository: phoenix
Updated Tags:  refs/tags/v4.7.0-HBase-1.0-rc3 [created] 2593e0669


svn commit: r12527 - in /dev/phoenix: phoenix-4.7.0-HBase-0.98-rc3/ phoenix-4.7.0-HBase-0.98-rc3/bin/ phoenix-4.7.0-HBase-0.98-rc3/src/ phoenix-4.7.0-HBase-1.0-rc3/ phoenix-4.7.0-HBase-1.0-rc3/bin/ ph

2016-02-24 Thread mujtaba
Author: mujtaba
Date: Thu Feb 25 06:32:57 2016
New Revision: 12527

Log:
Phoenix 4.7.0-rc3

Added:
dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/
dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/bin/

dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/bin/phoenix-4.7.0-HBase-0.98-bin.tar.gz
   (with props)

dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/bin/phoenix-4.7.0-HBase-0.98-bin.tar.gz.asc

dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/bin/phoenix-4.7.0-HBase-0.98-bin.tar.gz.md5

dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/bin/phoenix-4.7.0-HBase-0.98-bin.tar.gz.sha
dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/src/

dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/src/phoenix-4.7.0-HBase-0.98-src.tar.gz
   (with props)

dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/src/phoenix-4.7.0-HBase-0.98-src.tar.gz.asc

dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/src/phoenix-4.7.0-HBase-0.98-src.tar.gz.md5

dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/src/phoenix-4.7.0-HBase-0.98-src.tar.gz.sha
dev/phoenix/phoenix-4.7.0-HBase-1.0-rc3/
dev/phoenix/phoenix-4.7.0-HBase-1.0-rc3/bin/

dev/phoenix/phoenix-4.7.0-HBase-1.0-rc3/bin/phoenix-4.7.0-HBase-1.0-bin.tar.gz  
 (with props)

dev/phoenix/phoenix-4.7.0-HBase-1.0-rc3/bin/phoenix-4.7.0-HBase-1.0-bin.tar.gz.asc

dev/phoenix/phoenix-4.7.0-HBase-1.0-rc3/bin/phoenix-4.7.0-HBase-1.0-bin.tar.gz.md5

dev/phoenix/phoenix-4.7.0-HBase-1.0-rc3/bin/phoenix-4.7.0-HBase-1.0-bin.tar.gz.sha
dev/phoenix/phoenix-4.7.0-HBase-1.0-rc3/src/

dev/phoenix/phoenix-4.7.0-HBase-1.0-rc3/src/phoenix-4.7.0-HBase-1.0-src.tar.gz  
 (with props)

dev/phoenix/phoenix-4.7.0-HBase-1.0-rc3/src/phoenix-4.7.0-HBase-1.0-src.tar.gz.asc

dev/phoenix/phoenix-4.7.0-HBase-1.0-rc3/src/phoenix-4.7.0-HBase-1.0-src.tar.gz.md5

dev/phoenix/phoenix-4.7.0-HBase-1.0-rc3/src/phoenix-4.7.0-HBase-1.0-src.tar.gz.sha
dev/phoenix/phoenix-4.7.0-HBase-1.1-rc3/
dev/phoenix/phoenix-4.7.0-HBase-1.1-rc3/bin/

dev/phoenix/phoenix-4.7.0-HBase-1.1-rc3/bin/phoenix-4.7.0-HBase-1.1-bin.tar.gz  
 (with props)

dev/phoenix/phoenix-4.7.0-HBase-1.1-rc3/bin/phoenix-4.7.0-HBase-1.1-bin.tar.gz.asc

dev/phoenix/phoenix-4.7.0-HBase-1.1-rc3/bin/phoenix-4.7.0-HBase-1.1-bin.tar.gz.md5

dev/phoenix/phoenix-4.7.0-HBase-1.1-rc3/bin/phoenix-4.7.0-HBase-1.1-bin.tar.gz.sha
dev/phoenix/phoenix-4.7.0-HBase-1.1-rc3/src/

dev/phoenix/phoenix-4.7.0-HBase-1.1-rc3/src/phoenix-4.7.0-HBase-1.1-src.tar.gz  
 (with props)

dev/phoenix/phoenix-4.7.0-HBase-1.1-rc3/src/phoenix-4.7.0-HBase-1.1-src.tar.gz.asc

dev/phoenix/phoenix-4.7.0-HBase-1.1-rc3/src/phoenix-4.7.0-HBase-1.1-src.tar.gz.md5

dev/phoenix/phoenix-4.7.0-HBase-1.1-rc3/src/phoenix-4.7.0-HBase-1.1-src.tar.gz.sha

Added: 
dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/bin/phoenix-4.7.0-HBase-0.98-bin.tar.gz
==
Binary file - no diff available.

Propchange: 
dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/bin/phoenix-4.7.0-HBase-0.98-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: 
dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/bin/phoenix-4.7.0-HBase-0.98-bin.tar.gz.asc
==
--- 
dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/bin/phoenix-4.7.0-HBase-0.98-bin.tar.gz.asc
 (added)
+++ 
dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/bin/phoenix-4.7.0-HBase-0.98-bin.tar.gz.asc
 Thu Feb 25 06:32:57 2016
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+iQIcBAABAgAGBQJWzp1UAAoJEDv8s5KUYReOlo4QAMoX/ob/xcdckjC/GqNatd7u
+4xAu72pJsqUXnUkyk4hD45TiV/XcWea1UddBuvnpF3uSj7jFFIheMlb9adXBPVOe
+7HfRretJFNXrmpr5SCNHcALR705MaPprje4fqMLQqO8rd4KgGp6Z5ZiR8CteQDv9
+fsF9A1lG+qOvWM6EDmFy3h9eB1RxB22HoCgrgFYjE9E3VMdpS4ezE5kJMZYqUgXR
+1gAiADKrZfyPGpKtH6xQbQf7clsQIo/+vVa37rqMXimJAm1T5G763Clh1YfniTdD
+pIRcsArIbEIPlYqGCt5/fJiAE4VU1hte2rgeSTEgBrvgfHnX5aA68c1i6wvOj6if
+ByRA98pLqHd1jtufjRTW01D+8IHYDMNQbD0WtxwuK0HMYHB4YFKgvYDVWVSssL8o
+h9oRxHAOIwgrfdPCEvxR2gfuMNeHxPNhumKZZIimhGzNYvOTnzGXZCe7QiLrFEfG
+zh54dU4UJ0exZt08B+WL8J6ViyLyVbmn723ofUFwUQg2FECVorsprZ41uUSA2D7P
+D5T8XbRAsv0I7+eA3XaqlhatBXmf6WNMDa8uG9UF8AIpO3QWK7qQ7/42jfLSzHgo
+mMpO5U9fRJVyXFdFN31rLuERv+yqUJE73OInj1iarWfkW3Kjy+MSowHfs8inaj93
+XMS+tlaJD+//DkjQd886
+=wSR0
+-END PGP SIGNATURE-

Added: 
dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/bin/phoenix-4.7.0-HBase-0.98-bin.tar.gz.md5
==
--- 
dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/bin/phoenix-4.7.0-HBase-0.98-bin.tar.gz.md5
 (added)
+++ 
dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/bin/phoenix-4.7.0-HBase-0.98-bin.tar.gz.md5
 Thu Feb 25 06:32:57 2016
@@ -0,0 +1 @@
+26eb2ce7089f217fa330aba77ea79001 *phoenix-4.7.0-HBase-0.98-bin.tar.gz

Added: 
dev/phoenix/phoenix-4.7.0-HBase-0.98-rc3/bin/phoenix-4.7.0-HBase-0.98-bin.tar.gz.sha
==