See <https://builds.apache.org/job/Phoenix-4.8-HBase-1.1/58/changes>

Changes:

[elserj] PHOENIX-3505 Avoid NPE on close() in OrderedResultIterator

------------------------------------------
[...truncated 1033 lines...]
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.doNonAtomicRegionMutation(RSRpcServices.java:662)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.multi(RSRpcServices.java:2046)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:32393)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2117)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)
: 2 times, 
        at 
org.apache.hadoop.hbase.client.AsyncProcess$BatchErrors.makeException(AsyncProcess.java:228)
        at 
org.apache.hadoop.hbase.client.AsyncProcess$BatchErrors.access$1700(AsyncProcess.java:208)
        at 
org.apache.hadoop.hbase.client.AsyncProcess$AsyncRequestFutureImpl.getErrors(AsyncProcess.java:1599)
        at org.apache.hadoop.hbase.client.HTable.batch(HTable.java:936)
        at org.apache.hadoop.hbase.client.HTable.batch(HTable.java:950)
        at 
org.apache.hadoop.hbase.client.HTableWrapper.batch(HTableWrapper.java:255)
        at 
org.apache.phoenix.execute.DelegateHTable.batch(DelegateHTable.java:94)
        at 
org.apache.phoenix.hbase.index.write.ParallelWriterIndexCommitter$1.call(ParallelWriterIndexCommitter.java:167)
        at 
org.apache.phoenix.hbase.index.write.ParallelWriterIndexCommitter$1.call(ParallelWriterIndexCommitter.java:131)
        at java.util.concurrent.FutureTask.run(FutureTask.java:262)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        ... 1 more
: 1 time, 
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.helpTestWriteFailureDisablesIndex(MutableIndexFailureIT.java:225)
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.testWriteFailureDisablesIndex(MutableIndexFailureIT.java:127)

testWriteFailureDisablesIndex[transactional = false, localIndex = false, 
isNamespaceMapped = 
true](org.apache.phoenix.end2end.index.MutableIndexFailureIT)  Time elapsed: 
25.666 sec  <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: 
org.apache.phoenix.exception.PhoenixIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:393)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1024)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1708)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1783)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1740)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1020)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3078)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2853)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2795)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:202)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:620)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2420)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:32385)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2117)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: java.util.concurrent.ExecutionException: 
org.apache.phoenix.exception.PhoenixIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:393)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1024)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1708)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1783)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1740)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1020)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3078)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2853)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2795)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:202)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:620)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2420)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:32385)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2117)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.phoenix.exception.PhoenixIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:393)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1024)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1708)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1783)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1740)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1020)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3078)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2853)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2795)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:202)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:620)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2420)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:32385)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2117)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.phoenix.exception.PhoenixIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:393)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1024)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1708)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1783)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1740)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1020)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3078)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2853)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2795)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:202)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:620)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2420)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:32385)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2117)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.hadoop.hbase.DoNotRetryIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:393)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1024)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1708)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1783)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1740)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1020)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3078)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2853)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2795)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:202)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:620)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2420)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:32385)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2117)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:393)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1024)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1708)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1783)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1740)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1020)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3078)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2853)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2795)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:202)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:620)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2420)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:32385)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2117)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)


testWriteFailureDisablesIndex[transactional = false, localIndex = false, 
isNamespaceMapped = 
false](org.apache.phoenix.end2end.index.MutableIndexFailureIT)  Time elapsed: 
18.015 sec  <<< ERROR!
org.apache.phoenix.schema.TableAlreadyExistsException: ERROR 1013 (42M04): 
Table already exists. tableName=TEST.IDX_2
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.helpTestWriteFailureDisablesIndex(MutableIndexFailureIT.java:159)
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.testWriteFailureDisablesIndex(MutableIndexFailureIT.java:127)

testWriteFailureDisablesIndex[transactional = true, localIndex = false, 
isNamespaceMapped = 
true](org.apache.phoenix.end2end.index.MutableIndexFailureIT)  Time elapsed: 
30.703 sec  <<< FAILURE!
java.lang.AssertionError
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.helpTestWriteFailureDisablesIndex(MutableIndexFailureIT.java:164)
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.testWriteFailureDisablesIndex(MutableIndexFailureIT.java:127)

Running org.apache.phoenix.hbase.index.covered.example.EndToEndCoveredIndexingIT
Running org.apache.phoenix.hbase.index.covered.example.FailWithoutRetriesIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 15.544 sec - in 
org.apache.phoenix.hbase.index.covered.example.FailWithoutRetriesIT
Running 
org.apache.phoenix.hbase.index.covered.example.EndtoEndIndexingWithCompressionIT
Running org.apache.phoenix.iterate.RoundRobinResultIteratorWithStatsIT
Running org.apache.phoenix.iterate.ScannerLeaseRenewalIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 22.985 sec - in 
org.apache.phoenix.iterate.RoundRobinResultIteratorWithStatsIT
Running org.apache.phoenix.monitoring.PhoenixMetricsIT
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 235.813 sec - 
in org.apache.phoenix.hbase.index.covered.example.EndToEndCoveredIndexingIT
Tests run: 18, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 114.156 sec - 
in org.apache.phoenix.monitoring.PhoenixMetricsIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 178.338 sec - 
in org.apache.phoenix.iterate.ScannerLeaseRenewalIT
Tests run: 16, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1,645.195 sec 
- in org.apache.phoenix.end2end.IndexToolIT
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 244.931 sec - 
in 
org.apache.phoenix.hbase.index.covered.example.EndtoEndIndexingWithCompressionIT
Running org.apache.phoenix.rpc.PhoenixClientRpcIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 18.097 sec - in 
org.apache.phoenix.rpc.PhoenixClientRpcIT
Running org.apache.phoenix.rpc.PhoenixServerRpcIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 18.499 sec - in 
org.apache.phoenix.rpc.PhoenixServerRpcIT

Results :

Failed tests: 
  
MutableIndexFailureIT.testWriteFailureDisablesIndex:127->helpTestWriteFailureDisablesIndex:164
Tests in error: 
  
MutableIndexFailureIT.testWriteFailureDisablesIndex:127->helpTestWriteFailureDisablesIndex:159
 » TableAlreadyExists
org.apache.phoenix.end2end.index.MutableIndexFailureIT.testWriteFailureDisablesIndex[transactional
 = false, localIndex = false, isNamespaceMapped = 
true](org.apache.phoenix.end2end.index.MutableIndexFailureIT)
  Run 1: 
MutableIndexFailureIT.testWriteFailureDisablesIndex:127->helpTestWriteFailureDisablesIndex:225
 » Commit
  Run 2: 
MutableIndexFailureIT>BaseOwnClusterHBaseManagedTimeIT.cleanUpAfterTest:27->BaseTest.deletePriorMetaData:932->BaseTest.deletePriorTables:940->BaseTest.deletePriorTables:951->BaseTest.deletePriorTables:996
 » PhoenixIO


Tests run: 344, Failures: 1, Errors: 2, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:verify (ClientManagedTimeTests) @ 
phoenix-core ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Phoenix ..................................... SUCCESS [  3.389 s]
[INFO] Phoenix Core ....................................... FAILURE [  01:49 h]
[INFO] Phoenix - Flume .................................... SKIPPED
[INFO] Phoenix - Pig ...................................... SKIPPED
[INFO] Phoenix Query Server Client ........................ SKIPPED
[INFO] Phoenix Query Server ............................... SKIPPED
[INFO] Phoenix - Pherf .................................... SKIPPED
[INFO] Phoenix - Spark .................................... SKIPPED
[INFO] Phoenix - Hive ..................................... SKIPPED
[INFO] Phoenix Client ..................................... SKIPPED
[INFO] Phoenix Server ..................................... SKIPPED
[INFO] Phoenix Assembly ................................... SKIPPED
[INFO] Phoenix - Tracing Web Application .................. SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:49 h
[INFO] Finished at: 2016-12-25T06:54:32+00:00
[INFO] Final Memory: 60M/1150M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal 
org.apache.maven.plugins:maven-failsafe-plugin:2.19.1:verify 
(ClientManagedTimeTests) on project phoenix-core: There are test failures.
[ERROR] 
[ERROR] Please refer to 
<https://builds.apache.org/job/Phoenix-4.8-HBase-1.1/ws/phoenix-core/target/failsafe-reports>
 for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e 
switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please 
read the following articles:
[ERROR] [Help 1] 
http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :phoenix-core
Build step 'Invoke top-level Maven targets' marked build as failure
Archiving artifacts
Compressed 2.13 GB of artifacts by 69.6% relative to #57
Recording test results

Reply via email to