phoenix git commit: PHOENIX-4616 Move join query optimization out from QueryCompiler into QueryOptimizer (addendum)

2018-04-05 Thread maryannxue
Repository: phoenix
Updated Branches:
  refs/heads/master 49fca494b -> 0b1b219ef


PHOENIX-4616 Move join query optimization out from QueryCompiler into 
QueryOptimizer (addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0b1b219e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0b1b219e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0b1b219e

Branch: refs/heads/master
Commit: 0b1b219ef0e803d7ff254408c24b4bb67a5d88f9
Parents: 49fca49
Author: maryannxue 
Authored: Thu Apr 5 19:33:53 2018 -0700
Committer: maryannxue 
Committed: Thu Apr 5 19:33:53 2018 -0700

--
 .../main/java/org/apache/phoenix/optimize/QueryOptimizer.java   | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0b1b219e/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index 3a2d11e..6d668cc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
@@ -126,7 +126,10 @@ public class QueryOptimizer {
 }
 
 SelectStatement select = (SelectStatement) dataPlan.getStatement();
-if (!select.isUnion() && !select.isJoin() && 
select.getInnerSelectStatement() == null) {
+if (!select.isUnion()
+&& !select.isJoin()
+&& select.getInnerSelectStatement() == null
+&& (select.getWhere() == null || 
!select.getWhere().hasSubquery())) {
 return getApplicablePlansForSingleFlatQuery(dataPlan, statement, 
targetColumns, parallelIteratorFactory, stopAtBestPlan);
 }
 



phoenix git commit: PHOENIX-4530 Do not collect delete markers during major compaction of table with disabled mutable indexes

2018-04-05 Thread vincentpoon
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 3c1b3b547 -> 81f7b6713


PHOENIX-4530 Do not collect delete markers during major compaction of table 
with disabled mutable indexes


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/81f7b671
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/81f7b671
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/81f7b671

Branch: refs/heads/5.x-HBase-2.0
Commit: 81f7b67134285a3649809a9bcc4d4e9a19f9f3cb
Parents: 3c1b3b5
Author: Vincent Poon 
Authored: Wed Apr 4 13:28:23 2018 -0700
Committer: Vincent Poon 
Committed: Thu Apr 5 17:49:45 2018 -0700

--
 .../PartialScannerResultsDisabledIT.java|   2 +-
 .../UngroupedAggregateRegionObserverIT.java | 171 ---
 .../phoenix/end2end/index/MutableIndexIT.java   |  58 ++-
 .../end2end/index/PartialIndexRebuilderIT.java  |   2 +-
 .../UngroupedAggregateRegionObserver.java   | 125 +-
 .../java/org/apache/phoenix/util/TestUtil.java  |  19 +++
 6 files changed, 120 insertions(+), 257 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/81f7b671/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java
index 817b0bd..59471dd 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java
@@ -151,7 +151,7 @@ public class PartialScannerResultsDisabledIT extends 
ParallelStatsDisabledIT {
 return RandomStringUtils.randomAlphabetic(length);
 }
 
-private void writeSingleBatch(Connection connection, int batchSize, int 
numBatches, String tableName) throws Exception {
+public static void writeSingleBatch(Connection connection, int batchSize, 
int numBatches, String tableName) throws Exception {
 for (int j = 0; j < numBatches; j++) {
 try (PreparedStatement statement =
 
connection.prepareStatement(String.format(UPSERT_INTO_DATA_TABLE, tableName))) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/81f7b671/phoenix-core/src/it/java/org/apache/phoenix/end2end/UngroupedAggregateRegionObserverIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UngroupedAggregateRegionObserverIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UngroupedAggregateRegionObserverIT.java
deleted file mode 100644
index 0ae1bb5..000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UngroupedAggregateRegionObserverIT.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.never;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.log4j.Appender;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.spi.LoggingEvent;
-import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.schema.PIndexState;
-import org.apache.phoenix.util.EnvironmentEdgeManager;
-import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.SchemaUtil;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.Before;
-import org.junit.Test;
-import 

Build failed in Jenkins: Phoenix | Master #1978

2018-04-05 Thread Apache Jenkins Server
See 


Changes:

[maryannxue] PHOENIX-4616 Move join query optimization out from QueryCompiler 
into

--
[...truncated 48.56 KB...]
[INFO] Running org.apache.phoenix.compile.WhereOptimizerTest
[INFO] Tests run: 32, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.732 s 
- in org.apache.phoenix.compile.QueryMetaDataTest
[INFO] Running org.apache.phoenix.compile.TenantSpecificViewIndexCompileTest
[INFO] Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.563 s 
- in org.apache.phoenix.compile.HavingCompilerTest
[INFO] Running org.apache.phoenix.hbase.index.parallel.TestThreadPoolManager
[INFO] Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.022 s 
- in org.apache.phoenix.hbase.index.parallel.TestThreadPoolManager
[INFO] Running org.apache.phoenix.hbase.index.parallel.TestThreadPoolBuilder
[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.003 s 
- in org.apache.phoenix.hbase.index.parallel.TestThreadPoolBuilder
[INFO] Running 
org.apache.phoenix.hbase.index.covered.TestCoveredColumnIndexCodec
[INFO] Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.05 s - 
in org.apache.phoenix.hbase.index.covered.TestCoveredColumnIndexCodec
[INFO] Running 
org.apache.phoenix.hbase.index.covered.filter.TestNewerTimestampFilter
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.003 s 
- in org.apache.phoenix.hbase.index.covered.filter.TestNewerTimestampFilter
[INFO] Running 
org.apache.phoenix.hbase.index.covered.filter.TestApplyAndFilterDeletesFilter
[INFO] Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.006 s 
- in 
org.apache.phoenix.hbase.index.covered.filter.TestApplyAndFilterDeletesFilter
[INFO] Running 
org.apache.phoenix.hbase.index.covered.update.TestIndexUpdateManager
[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.305 s 
- in org.apache.phoenix.hbase.index.covered.update.TestIndexUpdateManager
[INFO] Running org.apache.phoenix.hbase.index.covered.TestColumnTracker
[INFO] Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.003 s 
- in org.apache.phoenix.hbase.index.covered.TestColumnTracker
[INFO] Running org.apache.phoenix.hbase.index.covered.data.TestIndexMemStore
[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.003 s 
- in org.apache.phoenix.hbase.index.covered.data.TestIndexMemStore
[INFO] Running org.apache.phoenix.hbase.index.covered.data.TestLocalTable
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.003 s 
- in org.apache.phoenix.hbase.index.covered.data.TestLocalTable
[INFO] Running org.apache.phoenix.hbase.index.covered.CoveredColumnsTest
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.001 s 
- in org.apache.phoenix.hbase.index.covered.CoveredColumnsTest
[INFO] Running 
org.apache.phoenix.hbase.index.covered.TestCoveredIndexSpecifierBuilder
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.004 s 
- in org.apache.phoenix.hbase.index.covered.TestCoveredIndexSpecifierBuilder
[INFO] Running org.apache.phoenix.hbase.index.covered.NonTxIndexBuilderTest
[INFO] Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.862 s 
- in org.apache.phoenix.compile.TenantSpecificViewIndexCompileTest
[INFO] Running org.apache.phoenix.hbase.index.covered.LocalTableStateTest
[INFO] Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.532 s 
- in org.apache.phoenix.query.KeyRangeMoreTest
[INFO] Running org.apache.phoenix.hbase.index.util.TestIndexManagementUtil
[INFO] Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.021 s 
- in org.apache.phoenix.hbase.index.util.TestIndexManagementUtil
[INFO] Running org.apache.phoenix.hbase.index.write.TestWALRecoveryCaching
[WARNING] Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.009 
s - in org.apache.phoenix.hbase.index.write.TestWALRecoveryCaching
[INFO] Running 
org.apache.phoenix.hbase.index.write.TestParalleWriterIndexCommitter
[INFO] Tests run: 110, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.258 
s - in org.apache.phoenix.compile.WhereOptimizerTest
[INFO] Running org.apache.phoenix.hbase.index.write.TestParalleIndexWriter
[INFO] Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.413 s 
- in org.apache.phoenix.hbase.index.covered.LocalTableStateTest
[INFO] Running 
org.apache.phoenix.hbase.index.write.recovery.TestPerRegionIndexWriteCache
[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.232 s 
- in org.apache.phoenix.hbase.index.write.TestParalleIndexWriter
[INFO] Running org.apache.phoenix.hbase.index.write.TestIndexWriter
[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.346 s 
- in org.apache.phoenix.hbase.index.write.TestParalleWriterIndexCommitter
[INFO] Running 

phoenix git commit: PHOENIX-4616 Move join query optimization out from QueryCompiler into QueryOptimizer (addendum)

2018-04-05 Thread maryannxue
Repository: phoenix
Updated Branches:
  refs/heads/master 701c447d3 -> 49fca494b


PHOENIX-4616 Move join query optimization out from QueryCompiler into 
QueryOptimizer (addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/49fca494
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/49fca494
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/49fca494

Branch: refs/heads/master
Commit: 49fca494bf9e13918db558e8276676e3dfda9d74
Parents: 701c447
Author: maryannxue 
Authored: Thu Apr 5 17:38:30 2018 -0700
Committer: maryannxue 
Committed: Thu Apr 5 17:38:30 2018 -0700

--
 .../java/org/apache/phoenix/optimize/QueryOptimizer.java| 9 -
 1 file changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/49fca494/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index 31f5c34..3a2d11e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
@@ -38,7 +38,6 @@ import org.apache.phoenix.compile.SequenceManager;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.compile.StatementNormalizer;
 import org.apache.phoenix.compile.SubqueryRewriter;
-import org.apache.phoenix.execute.BaseQueryPlan;
 import org.apache.phoenix.iterate.ParallelIteratorFactory;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixStatement;
@@ -126,11 +125,11 @@ public class QueryOptimizer {
 return Collections.singletonList(dataPlan);
 }
 
-if (dataPlan instanceof BaseQueryPlan) {
-return getApplicablePlans((BaseQueryPlan) dataPlan, statement, 
targetColumns, parallelIteratorFactory, stopAtBestPlan);
+SelectStatement select = (SelectStatement) dataPlan.getStatement();
+if (!select.isUnion() && !select.isJoin() && 
select.getInnerSelectStatement() == null) {
+return getApplicablePlansForSingleFlatQuery(dataPlan, statement, 
targetColumns, parallelIteratorFactory, stopAtBestPlan);
 }
 
-SelectStatement select = (SelectStatement) dataPlan.getStatement();
 ColumnResolver resolver = FromCompiler.getResolverForQuery(select, 
statement.getConnection());
 Map dataPlans = null;
 
@@ -187,7 +186,7 @@ public class QueryOptimizer {
 return Collections.singletonList(compiler.compile());
 }
 
-private List getApplicablePlans(BaseQueryPlan dataPlan, 
PhoenixStatement statement, List targetColumns, 
ParallelIteratorFactory parallelIteratorFactory, boolean stopAtBestPlan) throws 
SQLException {
+private List getApplicablePlansForSingleFlatQuery(QueryPlan 
dataPlan, PhoenixStatement statement, List targetColumns, 
ParallelIteratorFactory parallelIteratorFactory, boolean stopAtBestPlan) throws 
SQLException {
 SelectStatement select = (SelectStatement)dataPlan.getStatement();
 // Exit early if we have a point lookup as we can't get better than 
that
 if (dataPlan.getContext().getScanRanges().isPointLookup() && 
stopAtBestPlan) {



Build failed in Jenkins: Phoenix-4.x-HBase-1.2 #309

2018-04-05 Thread Apache Jenkins Server
See 


Changes:

[vincentpoon] Revert "PHOENIX-4682 UngroupedAggregateRegionObserver

[vincentpoon] PHOENIX-4682 UngroupedAggregateRegionObserver 
preCompactScannerOpen hook

--
[...truncated 108.91 KB...]
[INFO] Running org.apache.phoenix.replication.SystemCatalogWALEntryFilterIT
[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.107 s 
- in org.apache.phoenix.replication.SystemCatalogWALEntryFilterIT
[INFO] Running org.apache.phoenix.rpc.UpdateCacheIT
[INFO] Tests run: 34, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 319.272 
s - in org.apache.phoenix.end2end.join.SortMergeJoinGlobalIndexIT
[INFO] Tests run: 9, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 49.169 s 
- in org.apache.phoenix.iterate.RoundRobinResultIteratorIT
[INFO] Running org.apache.phoenix.trace.PhoenixTracingEndToEndIT
[INFO] Tests run: 7, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.715 s 
- in org.apache.phoenix.rpc.UpdateCacheIT
[INFO] Running org.apache.phoenix.tx.FlappingTransactionIT
[INFO] Running org.apache.phoenix.trace.PhoenixTableMetricsWriterIT
[INFO] Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.315 s 
- in org.apache.phoenix.tx.FlappingTransactionIT
[INFO] Running org.apache.phoenix.tx.ParameterizedTransactionIT
[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 12.307 s 
- in org.apache.phoenix.trace.PhoenixTableMetricsWriterIT
[INFO] Running org.apache.phoenix.tx.TransactionIT
[INFO] Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 160.004 
s - in org.apache.phoenix.end2end.join.SubqueryUsingSortMergeJoinIT
[INFO] Running org.apache.phoenix.tx.TxCheckpointIT
[INFO] Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 90.299 s 
- in org.apache.phoenix.trace.PhoenixTracingEndToEndIT
[INFO] Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 54.283 s 
- in org.apache.phoenix.tx.TransactionIT
[INFO] Tests run: 24, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 229.027 
s - in org.apache.phoenix.end2end.join.SubqueryIT
[INFO] Running org.apache.phoenix.util.IndexScrutinyIT
[INFO] Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 17.788 s 
- in org.apache.phoenix.util.IndexScrutinyIT
[INFO] Tests run: 34, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 581.646 
s - in org.apache.phoenix.end2end.join.HashJoinLocalIndexIT
[INFO] Tests run: 34, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 584.507 
s - in org.apache.phoenix.end2end.join.SortMergeJoinLocalIndexIT
[WARNING] Tests run: 52, Failures: 0, Errors: 0, Skipped: 4, Time elapsed: 
214.284 s - in org.apache.phoenix.tx.ParameterizedTransactionIT
[INFO] Tests run: 40, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 288.827 
s - in org.apache.phoenix.tx.TxCheckpointIT
[INFO] 
[INFO] Results:
[INFO] 
[WARNING] Tests run: 3400, Failures: 0, Errors: 0, Skipped: 7
[INFO] 
[INFO] 
[INFO] --- maven-failsafe-plugin:2.20:integration-test (HBaseManagedTimeTests) 
@ phoenix-core ---
[INFO] 
[INFO] ---
[INFO]  T E S T S
[INFO] ---
[INFO] 
[INFO] Results:
[INFO] 
[INFO] Tests run: 0, Failures: 0, Errors: 0, Skipped: 0
[INFO] 
[INFO] 
[INFO] --- maven-failsafe-plugin:2.20:integration-test 
(NeedTheirOwnClusterTests) @ phoenix-core ---
[INFO] 
[INFO] ---
[INFO]  T E S T S
[INFO] ---
[INFO] Running 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
[INFO] Running org.apache.phoenix.end2end.ChangePermissionsIT
[INFO] Running org.apache.phoenix.end2end.ConnectionUtilIT
[INFO] Running 
org.apache.hadoop.hbase.regionserver.wal.WALRecoveryRegionPostOpenIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 36.042 s 
- in 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.76 s - 
in org.apache.hadoop.hbase.regionserver.wal.WALRecoveryRegionPostOpenIT
[INFO] Running 
org.apache.phoenix.end2end.ColumnEncodedImmutableNonTxStatsCollectorIT
[INFO] Running org.apache.phoenix.end2end.ColumnEncodedMutableTxStatsCollectorIT
[INFO] Running 
org.apache.phoenix.end2end.ColumnEncodedMutableNonTxStatsCollectorIT
[INFO] Running 
org.apache.phoenix.end2end.ColumnEncodedImmutableTxStatsCollectorIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 22.507 s 
- in org.apache.phoenix.end2end.ConnectionUtilIT
[INFO] Running org.apache.phoenix.end2end.ContextClassloaderIT
[INFO] Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.536 s 
- in org.apache.phoenix.end2end.ContextClassloaderIT
[INFO] Running org.apache.phoenix.end2end.CountDistinctCompressionIT

Build failed in Jenkins: Phoenix | Master #1977

2018-04-05 Thread Apache Jenkins Server
See 


Changes:

[vincentpoon] Revert "PHOENIX-4682 UngroupedAggregateRegionObserver

[vincentpoon] PHOENIX-4682 UngroupedAggregateRegionObserver 
preCompactScannerOpen hook

--
[...truncated 116.17 KB...]
[ERROR]   DefaultColumnValueIT.testDefaultIndexed:978
[ERROR]   RowValueConstructorIT.testRVCLastPkIsTable1stPkIndex:1584
[ERROR]   
IndexMetadataIT.testMutableTableOnlyHasPrimaryKeyIndex:623->helpTestTableOnlyHasPrimaryKeyIndex:662
[ERROR] Errors: 
[ERROR]   
OrderByIT.testOrderByReverseOptimizationWithNUllsLastBug3491:969->doTestOrderByReverseOptimizationWithNUllsLastBug3491:1017->assertResultSet:1185
 ยป SQLTimeout
[INFO] 
[ERROR] Tests run: 3401, Failures: 4, Errors: 1, Skipped: 7
[INFO] 
[INFO] 
[INFO] --- maven-failsafe-plugin:2.20:integration-test (HBaseManagedTimeTests) 
@ phoenix-core ---
[INFO] 
[INFO] ---
[INFO]  T E S T S
[INFO] ---
[INFO] 
[INFO] Results:
[INFO] 
[INFO] Tests run: 0, Failures: 0, Errors: 0, Skipped: 0
[INFO] 
[INFO] 
[INFO] --- maven-failsafe-plugin:2.20:integration-test 
(NeedTheirOwnClusterTests) @ phoenix-core ---
[INFO] 
[INFO] ---
[INFO]  T E S T S
[INFO] ---
[INFO] Running 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
[INFO] Running org.apache.phoenix.end2end.ChangePermissionsIT
[INFO] Running org.apache.phoenix.end2end.ConnectionUtilIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 28.816 s 
- in 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
[INFO] Running 
org.apache.hadoop.hbase.regionserver.wal.WALRecoveryRegionPostOpenIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.491 s 
- in org.apache.hadoop.hbase.regionserver.wal.WALRecoveryRegionPostOpenIT
[INFO] Running 
org.apache.phoenix.end2end.ColumnEncodedMutableNonTxStatsCollectorIT
[INFO] Running 
org.apache.phoenix.end2end.ColumnEncodedImmutableTxStatsCollectorIT
[INFO] Running 
org.apache.phoenix.end2end.ColumnEncodedImmutableNonTxStatsCollectorIT
[INFO] Running org.apache.phoenix.end2end.ColumnEncodedMutableTxStatsCollectorIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 26.104 s 
- in org.apache.phoenix.end2end.ConnectionUtilIT
[INFO] Running org.apache.phoenix.end2end.ContextClassloaderIT
[INFO] Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.39 s - 
in org.apache.phoenix.end2end.ContextClassloaderIT
[INFO] Running org.apache.phoenix.end2end.CostBasedDecisionIT
[INFO] Running org.apache.phoenix.end2end.CountDistinctCompressionIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.579 s 
- in org.apache.phoenix.end2end.CountDistinctCompressionIT
[INFO] Running org.apache.phoenix.end2end.CsvBulkLoadToolIT
[WARNING] Tests run: 26, Failures: 0, Errors: 0, Skipped: 4, Time elapsed: 
111.462 s - in 
org.apache.phoenix.end2end.ColumnEncodedMutableNonTxStatsCollectorIT
[WARNING] Tests run: 26, Failures: 0, Errors: 0, Skipped: 4, Time elapsed: 
109.939 s - in 
org.apache.phoenix.end2end.ColumnEncodedImmutableNonTxStatsCollectorIT
[WARNING] Tests run: 26, Failures: 0, Errors: 0, Skipped: 4, Time elapsed: 
114.974 s - in 
org.apache.phoenix.end2end.ColumnEncodedImmutableTxStatsCollectorIT
[INFO] Running org.apache.phoenix.end2end.DropSchemaIT
[WARNING] Tests run: 26, Failures: 0, Errors: 0, Skipped: 4, Time elapsed: 
114.019 s - in org.apache.phoenix.end2end.ColumnEncodedMutableTxStatsCollectorIT
[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 19.216 s 
- in org.apache.phoenix.end2end.DropSchemaIT
[INFO] Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 83.61 s 
- in org.apache.phoenix.end2end.CsvBulkLoadToolIT
[INFO] Running org.apache.phoenix.end2end.IndexScrutinyToolIT
[INFO] Running org.apache.phoenix.end2end.FlappingLocalIndexIT
[INFO] Running org.apache.phoenix.end2end.IndexToolForPartialBuildIT
[INFO] Running org.apache.phoenix.end2end.IndexExtendedIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.511 s 
- in org.apache.phoenix.end2end.IndexToolForPartialBuildIT
[INFO] Running 
org.apache.phoenix.end2end.IndexToolForPartialBuildWithNamespaceEnabledIT
[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 19.639 s 
- in org.apache.phoenix.end2end.IndexToolForPartialBuildWithNamespaceEnabledIT
[INFO] Running org.apache.phoenix.end2end.IndexToolIT
[INFO] Running org.apache.phoenix.end2end.MigrateSystemTablesToSystemNamespaceIT
[INFO] Running org.apache.phoenix.end2end.LocalIndexSplitMergeIT
[INFO] Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 148.411 
s - in org.apache.phoenix.end2end.FlappingLocalIndexIT

[1/2] phoenix git commit: Revert "PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should not throw exceptions"

2018-04-05 Thread vincentpoon
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 14daededb -> 8b2ebe003


Revert "PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen 
hook should not throw exceptions"

This reverts commit 2da904ebcb84d03231cccae298d78b0add1012ba.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bbbfccc4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bbbfccc4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bbbfccc4

Branch: refs/heads/4.x-HBase-1.2
Commit: bbbfccc4de2f4cdf34391507a36c0624531a7d55
Parents: 14daede
Author: Vincent Poon 
Authored: Thu Apr 5 10:02:58 2018 -0700
Committer: Vincent Poon 
Committed: Thu Apr 5 11:51:13 2018 -0700

--
 .../phoenix/end2end/index/MutableIndexIT.java   | 41 +-
 .../UngroupedAggregateRegionObserver.java   | 81 
 2 files changed, 35 insertions(+), 87 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/bbbfccc4/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index 7456ba6..efae15e 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -41,25 +41,22 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
-import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.end2end.PartialScannerResultsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
@@ -870,42 +867,6 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
   }
   }
 
-  // some tables (e.g. indexes on views) have UngroupedAgg coproc loaded, but 
don't have a
-  // corresponding row in syscat.  This tests that compaction isn't blocked
-  @Test(timeout=12)
-  public void testCompactNonPhoenixTable() throws Exception {
-  try (Connection conn = getConnection()) {
-  // create a vanilla HBase table (non-Phoenix)
-  String randomTable = generateUniqueName();
-  TableName hbaseTN = TableName.valueOf(randomTable);
-  byte[] famBytes = Bytes.toBytes("fam");
-  HTable hTable = getUtility().createTable(hbaseTN, famBytes);
-  TestUtil.addCoprocessor(conn, randomTable, 
UngroupedAggregateRegionObserver.class);
-  Put put = new Put(Bytes.toBytes("row"));
-  byte[] value = new byte[1];
-  Bytes.random(value);
-  put.add(famBytes, Bytes.toBytes("colQ"), value);
-  hTable.put(put);
-  hTable.flushCommits();
-
-  // major compaction shouldn't cause a timeout or RS abort
-  List regions = 
getUtility().getHBaseCluster().getRegions(hbaseTN);
-  HRegion hRegion = regions.get(0);
-  hRegion.flush(true);
-  HStore store = (HStore) hRegion.getStore(famBytes);
-  store.triggerMajorCompaction();
-  store.compactRecentForTestingAssumingDefaultPolicy(1);
-
-  // we should be able to compact syscat itself as well
-  regions = 
getUtility().getHBaseCluster().getRegions(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
-  hRegion = regions.get(0);
-  hRegion.flush(true);
-  store = (HStore) 
hRegion.getStore(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
-  store.triggerMajorCompaction();
-  

[2/2] phoenix git commit: PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should not throw exceptions

2018-04-05 Thread vincentpoon
PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should 
not throw exceptions


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8b2ebe00
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8b2ebe00
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8b2ebe00

Branch: refs/heads/4.x-HBase-1.2
Commit: 8b2ebe00379b1423ba56eb30a3260c50fcf12fe6
Parents: bbbfccc
Author: Vincent Poon 
Authored: Thu Apr 5 10:03:30 2018 -0700
Committer: Vincent Poon 
Committed: Thu Apr 5 11:51:18 2018 -0700

--
 .../phoenix/end2end/index/MutableIndexIT.java   | 40 ++
 .../UngroupedAggregateRegionObserver.java   | 81 
 2 files changed, 87 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b2ebe00/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index efae15e..631f97f 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -41,15 +41,19 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.end2end.PartialScannerResultsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryConstants;
@@ -867,6 +871,42 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
   }
   }
 
+  // some tables (e.g. indexes on views) have UngroupedAgg coproc loaded, but 
don't have a
+  // corresponding row in syscat.  This tests that compaction isn't blocked
+  @Test(timeout=12)
+  public void testCompactNonPhoenixTable() throws Exception {
+  try (Connection conn = getConnection()) {
+  // create a vanilla HBase table (non-Phoenix)
+  String randomTable = generateUniqueName();
+  TableName hbaseTN = TableName.valueOf(randomTable);
+  byte[] famBytes = Bytes.toBytes("fam");
+  HTable hTable = getUtility().createTable(hbaseTN, famBytes);
+  TestUtil.addCoprocessor(conn, randomTable, 
UngroupedAggregateRegionObserver.class);
+  Put put = new Put(Bytes.toBytes("row"));
+  byte[] value = new byte[1];
+  Bytes.random(value);
+  put.add(famBytes, Bytes.toBytes("colQ"), value);
+  hTable.put(put);
+  hTable.flushCommits();
+
+  // major compaction shouldn't cause a timeout or RS abort
+  List regions = 
getUtility().getHBaseCluster().getRegions(hbaseTN);
+  HRegion hRegion = regions.get(0);
+  hRegion.flush(true);
+  HStore store = (HStore) hRegion.getStore(famBytes);
+  store.triggerMajorCompaction();
+  store.compactRecentForTestingAssumingDefaultPolicy(1);
+
+  // we should be able to compact syscat itself as well
+  regions = 
getUtility().getHBaseCluster().getRegions(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
+  hRegion = regions.get(0);
+  hRegion.flush(true);
+  store = (HStore) 
hRegion.getStore(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
+  store.triggerMajorCompaction();
+  store.compactRecentForTestingAssumingDefaultPolicy(1);
+  }
+  }
+
 private void upsertRow(String dml, Connection tenantConn, int i) throws 
SQLException {
 PreparedStatement stmt = tenantConn.prepareStatement(dml);
   stmt.setString(1, "00" + String.valueOf(i));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b2ebe00/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java

[1/2] phoenix git commit: Revert "PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should not throw exceptions"

2018-04-05 Thread vincentpoon
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 d9a38fb95 -> 995f417da


Revert "PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen 
hook should not throw exceptions"

This reverts commit 2da904ebcb84d03231cccae298d78b0add1012ba.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/31b6ccb8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/31b6ccb8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/31b6ccb8

Branch: refs/heads/4.x-HBase-1.3
Commit: 31b6ccb81e03f8739571ecc377399f5d14ab9a2e
Parents: d9a38fb
Author: Vincent Poon 
Authored: Thu Apr 5 10:02:58 2018 -0700
Committer: Vincent Poon 
Committed: Thu Apr 5 10:10:11 2018 -0700

--
 .../phoenix/end2end/index/MutableIndexIT.java   | 41 +-
 .../UngroupedAggregateRegionObserver.java   | 81 
 2 files changed, 35 insertions(+), 87 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/31b6ccb8/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index 7456ba6..efae15e 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -41,25 +41,22 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
-import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.end2end.PartialScannerResultsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
@@ -870,42 +867,6 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
   }
   }
 
-  // some tables (e.g. indexes on views) have UngroupedAgg coproc loaded, but 
don't have a
-  // corresponding row in syscat.  This tests that compaction isn't blocked
-  @Test(timeout=12)
-  public void testCompactNonPhoenixTable() throws Exception {
-  try (Connection conn = getConnection()) {
-  // create a vanilla HBase table (non-Phoenix)
-  String randomTable = generateUniqueName();
-  TableName hbaseTN = TableName.valueOf(randomTable);
-  byte[] famBytes = Bytes.toBytes("fam");
-  HTable hTable = getUtility().createTable(hbaseTN, famBytes);
-  TestUtil.addCoprocessor(conn, randomTable, 
UngroupedAggregateRegionObserver.class);
-  Put put = new Put(Bytes.toBytes("row"));
-  byte[] value = new byte[1];
-  Bytes.random(value);
-  put.add(famBytes, Bytes.toBytes("colQ"), value);
-  hTable.put(put);
-  hTable.flushCommits();
-
-  // major compaction shouldn't cause a timeout or RS abort
-  List regions = 
getUtility().getHBaseCluster().getRegions(hbaseTN);
-  HRegion hRegion = regions.get(0);
-  hRegion.flush(true);
-  HStore store = (HStore) hRegion.getStore(famBytes);
-  store.triggerMajorCompaction();
-  store.compactRecentForTestingAssumingDefaultPolicy(1);
-
-  // we should be able to compact syscat itself as well
-  regions = 
getUtility().getHBaseCluster().getRegions(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
-  hRegion = regions.get(0);
-  hRegion.flush(true);
-  store = (HStore) 
hRegion.getStore(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
-  store.triggerMajorCompaction();
-  

[2/2] phoenix git commit: PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should not throw exceptions

2018-04-05 Thread vincentpoon
PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should 
not throw exceptions


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/995f417d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/995f417d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/995f417d

Branch: refs/heads/4.x-HBase-1.3
Commit: 995f417da7e31d7076ee1820ddb9ec794f219807
Parents: 31b6ccb
Author: Vincent Poon 
Authored: Thu Apr 5 10:03:30 2018 -0700
Committer: Vincent Poon 
Committed: Thu Apr 5 10:10:17 2018 -0700

--
 .../phoenix/end2end/index/MutableIndexIT.java   | 40 ++
 .../UngroupedAggregateRegionObserver.java   | 81 
 2 files changed, 87 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/995f417d/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index efae15e..631f97f 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -41,15 +41,19 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.end2end.PartialScannerResultsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryConstants;
@@ -867,6 +871,42 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
   }
   }
 
+  // some tables (e.g. indexes on views) have UngroupedAgg coproc loaded, but 
don't have a
+  // corresponding row in syscat.  This tests that compaction isn't blocked
+  @Test(timeout=12)
+  public void testCompactNonPhoenixTable() throws Exception {
+  try (Connection conn = getConnection()) {
+  // create a vanilla HBase table (non-Phoenix)
+  String randomTable = generateUniqueName();
+  TableName hbaseTN = TableName.valueOf(randomTable);
+  byte[] famBytes = Bytes.toBytes("fam");
+  HTable hTable = getUtility().createTable(hbaseTN, famBytes);
+  TestUtil.addCoprocessor(conn, randomTable, 
UngroupedAggregateRegionObserver.class);
+  Put put = new Put(Bytes.toBytes("row"));
+  byte[] value = new byte[1];
+  Bytes.random(value);
+  put.add(famBytes, Bytes.toBytes("colQ"), value);
+  hTable.put(put);
+  hTable.flushCommits();
+
+  // major compaction shouldn't cause a timeout or RS abort
+  List regions = 
getUtility().getHBaseCluster().getRegions(hbaseTN);
+  HRegion hRegion = regions.get(0);
+  hRegion.flush(true);
+  HStore store = (HStore) hRegion.getStore(famBytes);
+  store.triggerMajorCompaction();
+  store.compactRecentForTestingAssumingDefaultPolicy(1);
+
+  // we should be able to compact syscat itself as well
+  regions = 
getUtility().getHBaseCluster().getRegions(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
+  hRegion = regions.get(0);
+  hRegion.flush(true);
+  store = (HStore) 
hRegion.getStore(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
+  store.triggerMajorCompaction();
+  store.compactRecentForTestingAssumingDefaultPolicy(1);
+  }
+  }
+
 private void upsertRow(String dml, Connection tenantConn, int i) throws 
SQLException {
 PreparedStatement stmt = tenantConn.prepareStatement(dml);
   stmt.setString(1, "00" + String.valueOf(i));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/995f417d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java

[1/2] phoenix git commit: Revert "PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should not throw exceptions"

2018-04-05 Thread vincentpoon
Repository: phoenix
Updated Branches:
  refs/heads/master 2da904ebc -> 701c447d3


Revert "PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen 
hook should not throw exceptions"

This reverts commit 2da904ebcb84d03231cccae298d78b0add1012ba.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/49610d18
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/49610d18
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/49610d18

Branch: refs/heads/master
Commit: 49610d188a34e078514cfc61560e2389933b80b0
Parents: 2da904e
Author: Vincent Poon 
Authored: Thu Apr 5 10:02:58 2018 -0700
Committer: Vincent Poon 
Committed: Thu Apr 5 10:02:58 2018 -0700

--
 .../phoenix/end2end/index/MutableIndexIT.java   | 41 +-
 .../UngroupedAggregateRegionObserver.java   | 81 
 2 files changed, 35 insertions(+), 87 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/49610d18/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index 7456ba6..efae15e 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -41,25 +41,22 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
-import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.end2end.PartialScannerResultsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
@@ -870,42 +867,6 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
   }
   }
 
-  // some tables (e.g. indexes on views) have UngroupedAgg coproc loaded, but 
don't have a
-  // corresponding row in syscat.  This tests that compaction isn't blocked
-  @Test(timeout=12)
-  public void testCompactNonPhoenixTable() throws Exception {
-  try (Connection conn = getConnection()) {
-  // create a vanilla HBase table (non-Phoenix)
-  String randomTable = generateUniqueName();
-  TableName hbaseTN = TableName.valueOf(randomTable);
-  byte[] famBytes = Bytes.toBytes("fam");
-  HTable hTable = getUtility().createTable(hbaseTN, famBytes);
-  TestUtil.addCoprocessor(conn, randomTable, 
UngroupedAggregateRegionObserver.class);
-  Put put = new Put(Bytes.toBytes("row"));
-  byte[] value = new byte[1];
-  Bytes.random(value);
-  put.add(famBytes, Bytes.toBytes("colQ"), value);
-  hTable.put(put);
-  hTable.flushCommits();
-
-  // major compaction shouldn't cause a timeout or RS abort
-  List regions = 
getUtility().getHBaseCluster().getRegions(hbaseTN);
-  HRegion hRegion = regions.get(0);
-  hRegion.flush(true);
-  HStore store = (HStore) hRegion.getStore(famBytes);
-  store.triggerMajorCompaction();
-  store.compactRecentForTestingAssumingDefaultPolicy(1);
-
-  // we should be able to compact syscat itself as well
-  regions = 
getUtility().getHBaseCluster().getRegions(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
-  hRegion = regions.get(0);
-  hRegion.flush(true);
-  store = (HStore) 
hRegion.getStore(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
-  store.triggerMajorCompaction();
-  

[2/2] phoenix git commit: PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should not throw exceptions

2018-04-05 Thread vincentpoon
PHOENIX-4682 UngroupedAggregateRegionObserver preCompactScannerOpen hook should 
not throw exceptions


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/701c447d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/701c447d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/701c447d

Branch: refs/heads/master
Commit: 701c447d366977eaa3d28d99940faf2bff958085
Parents: 49610d1
Author: Vincent Poon 
Authored: Thu Apr 5 10:03:30 2018 -0700
Committer: Vincent Poon 
Committed: Thu Apr 5 10:03:30 2018 -0700

--
 .../phoenix/end2end/index/MutableIndexIT.java   | 40 ++
 .../UngroupedAggregateRegionObserver.java   | 81 
 2 files changed, 87 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/701c447d/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index efae15e..631f97f 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -41,15 +41,19 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.end2end.PartialScannerResultsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryConstants;
@@ -867,6 +871,42 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
   }
   }
 
+  // some tables (e.g. indexes on views) have UngroupedAgg coproc loaded, but 
don't have a
+  // corresponding row in syscat.  This tests that compaction isn't blocked
+  @Test(timeout=12)
+  public void testCompactNonPhoenixTable() throws Exception {
+  try (Connection conn = getConnection()) {
+  // create a vanilla HBase table (non-Phoenix)
+  String randomTable = generateUniqueName();
+  TableName hbaseTN = TableName.valueOf(randomTable);
+  byte[] famBytes = Bytes.toBytes("fam");
+  HTable hTable = getUtility().createTable(hbaseTN, famBytes);
+  TestUtil.addCoprocessor(conn, randomTable, 
UngroupedAggregateRegionObserver.class);
+  Put put = new Put(Bytes.toBytes("row"));
+  byte[] value = new byte[1];
+  Bytes.random(value);
+  put.add(famBytes, Bytes.toBytes("colQ"), value);
+  hTable.put(put);
+  hTable.flushCommits();
+
+  // major compaction shouldn't cause a timeout or RS abort
+  List regions = 
getUtility().getHBaseCluster().getRegions(hbaseTN);
+  HRegion hRegion = regions.get(0);
+  hRegion.flush(true);
+  HStore store = (HStore) hRegion.getStore(famBytes);
+  store.triggerMajorCompaction();
+  store.compactRecentForTestingAssumingDefaultPolicy(1);
+
+  // we should be able to compact syscat itself as well
+  regions = 
getUtility().getHBaseCluster().getRegions(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
+  hRegion = regions.get(0);
+  hRegion.flush(true);
+  store = (HStore) 
hRegion.getStore(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
+  store.triggerMajorCompaction();
+  store.compactRecentForTestingAssumingDefaultPolicy(1);
+  }
+  }
+
 private void upsertRow(String dml, Connection tenantConn, int i) throws 
SQLException {
 PreparedStatement stmt = tenantConn.prepareStatement(dml);
   stmt.setString(1, "00" + String.valueOf(i));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/701c447d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java

Build failed in Jenkins: Phoenix Compile Compatibility with HBase #597

2018-04-05 Thread Apache Jenkins Server
See 


--
[...truncated 39.68 KB...]
[ERROR] 
:[364,5]
 method does not override or implement a method from a supertype
[ERROR] 
:[370,5]
 method does not override or implement a method from a supertype
[ERROR] 
:[376,5]
 method does not override or implement a method from a supertype
[ERROR] 
:[382,5]
 method does not override or implement a method from a supertype
[ERROR] Failed to execute goal 
org.apache.maven.plugins:maven-compiler-plugin:3.0:compile (default-compile) on 
project phoenix-core: Compilation failure: Compilation failure: 
[ERROR] 
:[34,39]
 cannot find symbol
[ERROR]   symbol:   class MetricRegistry
[ERROR]   location: package org.apache.hadoop.hbase.metrics
[ERROR] 
:[144,16]
 cannot find symbol
[ERROR]   symbol:   class MetricRegistry
[ERROR]   location: class 
org.apache.phoenix.coprocessor.PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment
[ERROR] 
:[24,35]
 cannot find symbol
[ERROR]   symbol:   class DelegatingHBaseRpcController
[ERROR]   location: package org.apache.hadoop.hbase.ipc
[ERROR] 
:[25,35]
 cannot find symbol
[ERROR]   symbol:   class HBaseRpcController
[ERROR]   location: package org.apache.hadoop.hbase.ipc
[ERROR] 
:[37,37]
 cannot find symbol
[ERROR]   symbol: class DelegatingHBaseRpcController
[ERROR] 
:[56,38]
 cannot find symbol
[ERROR]   symbol:   class HBaseRpcController
[ERROR]   location: class 
org.apache.hadoop.hbase.ipc.controller.MetadataRpcController
[ERROR] 
:[26,35]
 cannot find symbol
[ERROR]   symbol:   class HBaseRpcController
[ERROR]   location: package org.apache.hadoop.hbase.ipc
[ERROR] 
:[40,12]
 cannot find symbol
[ERROR]   symbol:   class HBaseRpcController
[ERROR]   location: class 
org.apache.hadoop.hbase.ipc.controller.InterRegionServerMetadataRpcControllerFactory
[ERROR] 
:[46,12]
 cannot find symbol
[ERROR]   symbol:   class HBaseRpcController
[ERROR]   location: class 
org.apache.hadoop.hbase.ipc.controller.InterRegionServerMetadataRpcControllerFactory
[ERROR] 
:[52,12]
 cannot find symbol
[ERROR]   symbol:   class HBaseRpcController
[ERROR]   location: class 
org.apache.hadoop.hbase.ipc.controller.InterRegionServerMetadataRpcControllerFactory
[ERROR] 
:[57,46]
 cannot