See <https://builds.apache.org/job/Phoenix-master/1238/changes>
Changes:
[elserj] [PHOENIX-2955] replace xpath to xmllint at make_rc.sh fix problem:
------------------------------------------
[...truncated 27244 lines...]
at
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.RuntimeException: java.lang.RuntimeException:
java.lang.OutOfMemoryError: unable to create new native thread
at
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:208)
at
org.apache.hadoop.hbase.client.ClientScanner.call(ClientScanner.java:320)
at
org.apache.hadoop.hbase.client.ClientScanner.nextScanner(ClientScanner.java:295)
at
org.apache.hadoop.hbase.client.ClientScanner.initializeScannerInConstruction(ClientScanner.java:160)
at
org.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:155)
at org.apache.hadoop.hbase.client.HTable.getScanner(HTable.java:802)
at
org.apache.hadoop.hbase.client.HTableWrapper.getScanner(HTableWrapper.java:215)
at
org.apache.phoenix.schema.stats.StatisticsUtil.readStatistics(StatisticsUtil.java:139)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.getTable(MetaDataEndpointImpl.java:964)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.buildTable(MetaDataEndpointImpl.java:542)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.doGetTable(MetaDataEndpointImpl.java:2896)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.doGetTable(MetaDataEndpointImpl.java:2841)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.getTable(MetaDataEndpointImpl.java:492)
... 10 more
Caused by: java.lang.RuntimeException: java.lang.OutOfMemoryError: unable to
create new native thread
at
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:208)
at
org.apache.hadoop.hbase.client.ClientSmallReversedScanner.loadCache(ClientSmallReversedScanner.java:212)
at
org.apache.hadoop.hbase.client.ClientSmallReversedScanner.next(ClientSmallReversedScanner.java:186)
at
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegionInMeta(ConnectionManager.java:1279)
at
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegion(ConnectionManager.java:1185)
at
org.apache.hadoop.hbase.client.CoprocessorHConnection.locateRegion(CoprocessorHConnection.java:41)
at
org.apache.hadoop.hbase.client.RpcRetryingCallerWithReadReplicas.getRegionLocations(RpcRetryingCallerWithReadReplicas.java:305)
at
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:156)
at
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:60)
at
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:200)
... 22 more
Caused by: java.lang.OutOfMemoryError: unable to create new native thread
at java.lang.Thread.start0(Native Method)
at java.lang.Thread.start(Thread.java:714)
at
java.util.concurrent.ThreadPoolExecutor.addWorker(ThreadPoolExecutor.java:949)
at
java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360)
at
org.apache.hadoop.hbase.client.ResultBoundedCompletionService.submit(ResultBoundedCompletionService.java:146)
at
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.addCallsForCurrentReplica(ScannerCallableWithReplicas.java:283)
at
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:170)
at
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:60)
at
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:200)
... 31 more
testWriteFailureReadOnlyIndex[localIndex =
false](org.apache.phoenix.end2end.index.ReadOnlyIndexFailureIT) Time elapsed:
1.69 sec <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException:
org.apache.hadoop.hbase.DoNotRetryIOException: T: unable to create new native
thread
at
org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:87)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1697)
at
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16108)
at
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:7761)
at
org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:1988)
at
org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:1970)
at
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:33652)
at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2170)
at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:109)
at
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.OutOfMemoryError: unable to create new native thread
at java.lang.Thread.start0(Native Method)
at java.lang.Thread.start(Thread.java:714)
at org.apache.zookeeper.ClientCnxn.start(ClientCnxn.java:405)
at org.apache.zookeeper.ZooKeeper.<init>(ZooKeeper.java:450)
at org.apache.zookeeper.ZooKeeper.<init>(ZooKeeper.java:380)
at
org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.checkZk(RecoverableZooKeeper.java:141)
at
org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.<init>(RecoverableZooKeeper.java:128)
at org.apache.hadoop.hbase.zookeeper.ZKUtil.connect(ZKUtil.java:137)
at
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.<init>(ZooKeeperWatcher.java:185)
at
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.<init>(ZooKeeperWatcher.java:153)
at
org.apache.hadoop.hbase.client.ZooKeeperKeepAliveConnection.<init>(ZooKeeperKeepAliveConnection.java:43)
at
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.getKeepAliveZooKeeperWatcher(ConnectionManager.java:1693)
at
org.apache.hadoop.hbase.client.ZooKeeperRegistry.getClusterId(ZooKeeperRegistry.java:104)
at
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.retrieveClusterId(ConnectionManager.java:909)
at
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.<init>(ConnectionManager.java:652)
at
org.apache.hadoop.hbase.client.CoprocessorHConnection.<init>(CoprocessorHConnection.java:99)
at
org.apache.hadoop.hbase.client.CoprocessorHConnection.<init>(CoprocessorHConnection.java:89)
at
org.apache.hadoop.hbase.client.CoprocessorHConnection.getConnectionForEnvironment(CoprocessorHConnection.java:61)
at
org.apache.hadoop.hbase.client.HTableWrapper.createWrapper(HTableWrapper.java:68)
at
org.apache.hadoop.hbase.coprocessor.CoprocessorHost$Environment.getTable(CoprocessorHost.java:514)
at
org.apache.hadoop.hbase.coprocessor.CoprocessorHost$Environment.getTable(CoprocessorHost.java:503)
at
org.apache.phoenix.util.ServerUtil.getHTableForCoprocessorScan(ServerUtil.java:170)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.getTable(MetaDataEndpointImpl.java:961)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.buildTable(MetaDataEndpointImpl.java:542)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.doDropTable(MetaDataEndpointImpl.java:1717)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1670)
... 10 more
Caused by: org.apache.hadoop.hbase.DoNotRetryIOException:
org.apache.hadoop.hbase.DoNotRetryIOException: T: unable to create new native
thread
at
org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:87)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1697)
at
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16108)
at
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:7761)
at
org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:1988)
at
org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:1970)
at
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:33652)
at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2170)
at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:109)
at
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.OutOfMemoryError: unable to create new native thread
at java.lang.Thread.start0(Native Method)
at java.lang.Thread.start(Thread.java:714)
at org.apache.zookeeper.ClientCnxn.start(ClientCnxn.java:405)
at org.apache.zookeeper.ZooKeeper.<init>(ZooKeeper.java:450)
at org.apache.zookeeper.ZooKeeper.<init>(ZooKeeper.java:380)
at
org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.checkZk(RecoverableZooKeeper.java:141)
at
org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.<init>(RecoverableZooKeeper.java:128)
at org.apache.hadoop.hbase.zookeeper.ZKUtil.connect(ZKUtil.java:137)
at
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.<init>(ZooKeeperWatcher.java:185)
at
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.<init>(ZooKeeperWatcher.java:153)
at
org.apache.hadoop.hbase.client.ZooKeeperKeepAliveConnection.<init>(ZooKeeperKeepAliveConnection.java:43)
at
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.getKeepAliveZooKeeperWatcher(ConnectionManager.java:1693)
at
org.apache.hadoop.hbase.client.ZooKeeperRegistry.getClusterId(ZooKeeperRegistry.java:104)
at
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.retrieveClusterId(ConnectionManager.java:909)
at
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.<init>(ConnectionManager.java:652)
at
org.apache.hadoop.hbase.client.CoprocessorHConnection.<init>(CoprocessorHConnection.java:99)
at
org.apache.hadoop.hbase.client.CoprocessorHConnection.<init>(CoprocessorHConnection.java:89)
at
org.apache.hadoop.hbase.client.CoprocessorHConnection.getConnectionForEnvironment(CoprocessorHConnection.java:61)
at
org.apache.hadoop.hbase.client.HTableWrapper.createWrapper(HTableWrapper.java:68)
at
org.apache.hadoop.hbase.coprocessor.CoprocessorHost$Environment.getTable(CoprocessorHost.java:514)
at
org.apache.hadoop.hbase.coprocessor.CoprocessorHost$Environment.getTable(CoprocessorHost.java:503)
at
org.apache.phoenix.util.ServerUtil.getHTableForCoprocessorScan(ServerUtil.java:170)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.getTable(MetaDataEndpointImpl.java:961)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.buildTable(MetaDataEndpointImpl.java:542)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.doDropTable(MetaDataEndpointImpl.java:1717)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1670)
... 10 more
Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException:
org.apache.hadoop.hbase.DoNotRetryIOException: T: unable to create new native
thread
at
org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:87)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1697)
at
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16108)
at
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:7761)
at
org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:1988)
at
org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:1970)
at
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:33652)
at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2170)
at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:109)
at
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.OutOfMemoryError: unable to create new native thread
at java.lang.Thread.start0(Native Method)
at java.lang.Thread.start(Thread.java:714)
at org.apache.zookeeper.ClientCnxn.start(ClientCnxn.java:405)
at org.apache.zookeeper.ZooKeeper.<init>(ZooKeeper.java:450)
at org.apache.zookeeper.ZooKeeper.<init>(ZooKeeper.java:380)
at
org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.checkZk(RecoverableZooKeeper.java:141)
at
org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.<init>(RecoverableZooKeeper.java:128)
at org.apache.hadoop.hbase.zookeeper.ZKUtil.connect(ZKUtil.java:137)
at
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.<init>(ZooKeeperWatcher.java:185)
at
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.<init>(ZooKeeperWatcher.java:153)
at
org.apache.hadoop.hbase.client.ZooKeeperKeepAliveConnection.<init>(ZooKeeperKeepAliveConnection.java:43)
at
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.getKeepAliveZooKeeperWatcher(ConnectionManager.java:1693)
at
org.apache.hadoop.hbase.client.ZooKeeperRegistry.getClusterId(ZooKeeperRegistry.java:104)
at
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.retrieveClusterId(ConnectionManager.java:909)
at
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.<init>(ConnectionManager.java:652)
at
org.apache.hadoop.hbase.client.CoprocessorHConnection.<init>(CoprocessorHConnection.java:99)
at
org.apache.hadoop.hbase.client.CoprocessorHConnection.<init>(CoprocessorHConnection.java:89)
at
org.apache.hadoop.hbase.client.CoprocessorHConnection.getConnectionForEnvironment(CoprocessorHConnection.java:61)
at
org.apache.hadoop.hbase.client.HTableWrapper.createWrapper(HTableWrapper.java:68)
at
org.apache.hadoop.hbase.coprocessor.CoprocessorHost$Environment.getTable(CoprocessorHost.java:514)
at
org.apache.hadoop.hbase.coprocessor.CoprocessorHost$Environment.getTable(CoprocessorHost.java:503)
at
org.apache.phoenix.util.ServerUtil.getHTableForCoprocessorScan(ServerUtil.java:170)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.getTable(MetaDataEndpointImpl.java:961)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.buildTable(MetaDataEndpointImpl.java:542)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.doDropTable(MetaDataEndpointImpl.java:1717)
at
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1670)
... 10 more
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 31.339 sec - in
org.apache.phoenix.end2end.index.txn.TxWriteFailureIT
Running org.apache.phoenix.hbase.index.FailForUnsupportedHBaseVersionsIT
Tests run: 48, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 180.84 sec -
in org.apache.phoenix.end2end.ViewIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 18.469 sec - in
org.apache.phoenix.hbase.index.FailForUnsupportedHBaseVersionsIT
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 124.768 sec -
in org.apache.phoenix.end2end.index.MutableIndexFailureIT
Running org.apache.phoenix.execute.PartialCommitIT
Running
org.apache.phoenix.hbase.index.covered.EndToEndCoveredColumnsIndexBuilderIT
Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.412 sec - in
org.apache.phoenix.execute.PartialCommitIT
Running org.apache.phoenix.hbase.index.covered.example.FailWithoutRetriesIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 18.122 sec - in
org.apache.phoenix.hbase.index.covered.EndToEndCoveredColumnsIndexBuilderIT
Running
org.apache.phoenix.hbase.index.covered.example.EndtoEndIndexingWithCompressionIT
Running org.apache.phoenix.hbase.index.covered.example.EndToEndCoveredIndexingIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.115 sec - in
org.apache.phoenix.hbase.index.covered.example.FailWithoutRetriesIT
Running org.apache.phoenix.iterate.RoundRobinResultIteratorWithStatsIT
Running org.apache.phoenix.iterate.ScannerLeaseRenewalIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 12.898 sec - in
org.apache.phoenix.iterate.RoundRobinResultIteratorWithStatsIT
Running org.apache.phoenix.monitoring.PhoenixMetricsIT
Tests run: 16, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 801.116 sec -
in org.apache.phoenix.end2end.IndexToolIT
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 131.402 sec -
in org.apache.phoenix.hbase.index.covered.example.EndToEndCoveredIndexingIT
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 132.519 sec -
in
org.apache.phoenix.hbase.index.covered.example.EndtoEndIndexingWithCompressionIT
Tests run: 18, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 67.436 sec -
in org.apache.phoenix.monitoring.PhoenixMetricsIT
Running org.apache.phoenix.rpc.PhoenixClientRpcIT
Running org.apache.phoenix.rpc.PhoenixServerRpcIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.857 sec - in
org.apache.phoenix.rpc.PhoenixClientRpcIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.964 sec - in
org.apache.phoenix.rpc.PhoenixServerRpcIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 170.715 sec -
in org.apache.phoenix.iterate.ScannerLeaseRenewalIT
Results :
Tests in error:
org.apache.phoenix.end2end.index.ReadOnlyIndexFailureIT.testWriteFailureReadOnlyIndex[localIndex
= false](org.apache.phoenix.end2end.index.ReadOnlyIndexFailureIT)
Run 1:
ReadOnlyIndexFailureIT.testWriteFailureReadOnlyIndex:125->helpTestWriteFailureReadOnlyIndex:137
» PhoenixIO
Run 2:
ReadOnlyIndexFailureIT>BaseOwnClusterHBaseManagedTimeIT.cleanUpAfterTest:29->BaseTest.deletePriorMetaData:932->BaseTest.deletePriorTables:940->BaseTest.deletePriorTables:951->BaseTest.deletePriorTables:996
» PhoenixIO
Tests run: 338, Failures: 0, Errors: 1, Skipped: 0
[INFO]
[INFO] --- maven-failsafe-plugin:2.19.1:verify (ClientManagedTimeTests) @
phoenix-core ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO]
[INFO] Apache Phoenix .................................... SUCCESS [3.165s]
[INFO] Phoenix Core ...................................... FAILURE [53:41.481s]
[INFO] Phoenix - Flume ................................... SKIPPED
[INFO] Phoenix - Pig ..................................... SKIPPED
[INFO] Phoenix Query Server Client ....................... SKIPPED
[INFO] Phoenix Query Server .............................. SKIPPED
[INFO] Phoenix - Pherf ................................... SKIPPED
[INFO] Phoenix - Spark ................................... SKIPPED
[INFO] Phoenix - Hive .................................... SKIPPED
[INFO] Phoenix Assembly .................................. SKIPPED
[INFO] Phoenix - Tracing Web Application ................. SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 53:45.350s
[INFO] Finished at: Wed Jun 01 18:47:10 UTC 2016
[INFO] Final Memory: 49M/1084M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal
org.apache.maven.plugins:maven-failsafe-plugin:2.19.1:verify
(ClientManagedTimeTests) on project phoenix-core: There was a timeout or other
error in the fork -> [Help 1]
[ERROR]
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e
switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR]
[ERROR] For more information about the errors and possible solutions, please
read the following articles:
[ERROR] [Help 1]
http://cwiki.apache.org/confluence/display/MAVEN/MojoExecutionException
[ERROR]
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR] mvn <goals> -rf :phoenix-core
Build step 'Invoke top-level Maven targets' marked build as failure
Archiving artifacts
Compressed 1.83 GB of artifacts by 39.5% relative to #1237
Updating PHOENIX-2955
Recording test results