See 
<https://builds.apache.org/job/Phoenix-4.x-HBase-1.2/570/display/redirect?page=changes>

Changes:

[vincentpoon] PHOENIX-4781 Create artifact jar so that shaded jar replaces it 
properly

------------------------------------------
[...truncated 200.89 KB...]
        at 
org.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:162)
        at org.apache.hadoop.hbase.client.HTable.getScanner(HTable.java:797)
        at 
org.apache.hadoop.hbase.client.HTableWrapper.getScanner(HTableWrapper.java:215)
        at 
org.apache.phoenix.coprocessor.ViewFinder.findRelatedViews(ViewFinder.java:93)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropChildViews(MetaDataEndpointImpl.java:2431)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.createTable(MetaDataEndpointImpl.java:2046)
        ... 10 more
Caused by: java.lang.OutOfMemoryError: unable to create new native thread
        at java.lang.Thread.start0(Native Method)
        at java.lang.Thread.start(Thread.java:717)
        at 
java.util.concurrent.ThreadPoolExecutor.addWorker(ThreadPoolExecutor.java:957)
        at 
java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1367)
        at 
org.apache.hadoop.hbase.client.ResultBoundedCompletionService.submit(ResultBoundedCompletionService.java:146)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.addCallsForCurrentReplica(ScannerCallableWithReplicas.java:287)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:170)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:60)
        at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:210)
        ... 19 more

Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: 
org.apache.hadoop.hbase.DoNotRetryIOException: SCHEMA4.N000023: 
java.lang.OutOfMemoryError: unable to create new native thread
        at 
org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:120)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.createTable(MetaDataEndpointImpl.java:2420)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:17053)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:7873)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2008)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:1990)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:33652)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2188)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:112)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.RuntimeException: java.lang.OutOfMemoryError: unable to 
create new native thread
        at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:218)
        at 
org.apache.hadoop.hbase.client.ClientScanner.call(ClientScanner.java:327)
        at 
org.apache.hadoop.hbase.client.ClientScanner.nextScanner(ClientScanner.java:302)
        at 
org.apache.hadoop.hbase.client.ClientScanner.initializeScannerInConstruction(ClientScanner.java:167)
        at 
org.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:162)
        at org.apache.hadoop.hbase.client.HTable.getScanner(HTable.java:797)
        at 
org.apache.hadoop.hbase.client.HTableWrapper.getScanner(HTableWrapper.java:215)
        at 
org.apache.phoenix.coprocessor.ViewFinder.findRelatedViews(ViewFinder.java:93)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropChildViews(MetaDataEndpointImpl.java:2431)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.createTable(MetaDataEndpointImpl.java:2046)
        ... 10 more
Caused by: java.lang.OutOfMemoryError: unable to create new native thread
        at java.lang.Thread.start0(Native Method)
        at java.lang.Thread.start(Thread.java:717)
        at 
java.util.concurrent.ThreadPoolExecutor.addWorker(ThreadPoolExecutor.java:957)
        at 
java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1367)
        at 
org.apache.hadoop.hbase.client.ResultBoundedCompletionService.submit(ResultBoundedCompletionService.java:146)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.addCallsForCurrentReplica(ScannerCallableWithReplicas.java:287)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:170)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:60)
        at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:210)
        ... 19 more


[ERROR] 
testViewAndTableAndDropCascadeWithIndexes[ViewIT_transactionProvider=TEPHRA, 
columnEncoded=false](org.apache.phoenix.end2end.ViewIT)  Time elapsed: 4.377 s  
<<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException: SCHEMA1.N000024: unable to 
create new native thread
        at 
org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:120)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.createTable(MetaDataEndpointImpl.java:2420)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:17053)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:7873)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2008)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:1990)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:33652)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2188)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:112)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.OutOfMemoryError: unable to create new native thread
        at java.lang.Thread.start0(Native Method)
        at java.lang.Thread.start(Thread.java:717)
        at org.apache.zookeeper.ClientCnxn.start(ClientCnxn.java:405)
        at org.apache.zookeeper.ZooKeeper.<init>(ZooKeeper.java:450)
        at org.apache.zookeeper.ZooKeeper.<init>(ZooKeeper.java:380)
        at 
org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.checkZk(RecoverableZooKeeper.java:141)
        at 
org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.<init>(RecoverableZooKeeper.java:128)
        at org.apache.hadoop.hbase.zookeeper.ZKUtil.connect(ZKUtil.java:137)
        at 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.<init>(ZooKeeperWatcher.java:185)
        at 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.<init>(ZooKeeperWatcher.java:153)
        at 
org.apache.hadoop.hbase.client.ZooKeeperKeepAliveConnection.<init>(ZooKeeperKeepAliveConnection.java:43)
        at 
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.getKeepAliveZooKeeperWatcher(ConnectionManager.java:1690)
        at 
org.apache.hadoop.hbase.client.ZooKeeperRegistry.getClusterId(ZooKeeperRegistry.java:104)
        at 
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.retrieveClusterId(ConnectionManager.java:905)
        at 
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.<init>(ConnectionManager.java:648)
        at 
org.apache.hadoop.hbase.client.CoprocessorHConnection.<init>(CoprocessorHConnection.java:99)
        at 
org.apache.hadoop.hbase.client.CoprocessorHConnection.<init>(CoprocessorHConnection.java:89)
        at 
org.apache.hadoop.hbase.client.CoprocessorHConnection.getConnectionForEnvironment(CoprocessorHConnection.java:61)
        at 
org.apache.hadoop.hbase.client.HTableWrapper.createWrapper(HTableWrapper.java:68)
        at 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost$Environment.getTable(CoprocessorHost.java:514)
        at 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost$Environment.getTable(CoprocessorHost.java:503)
        at 
org.apache.phoenix.util.ServerUtil.getHTableForCoprocessorScan(ServerUtil.java:230)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropChildViews(MetaDataEndpointImpl.java:2427)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.createTable(MetaDataEndpointImpl.java:2046)
        ... 10 more

        at 
org.apache.phoenix.end2end.ViewIT.testViewAndTableAndDropCascadeWithIndexes(ViewIT.java:620)
Caused by: org.apache.hadoop.hbase.DoNotRetryIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException: SCHEMA1.N000024: unable to 
create new native thread
        at 
org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:120)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.createTable(MetaDataEndpointImpl.java:2420)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:17053)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:7873)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2008)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:1990)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:33652)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2188)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:112)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.OutOfMemoryError: unable to create new native thread
        at java.lang.Thread.start0(Native Method)
        at java.lang.Thread.start(Thread.java:717)
        at org.apache.zookeeper.ClientCnxn.start(ClientCnxn.java:405)
        at org.apache.zookeeper.ZooKeeper.<init>(ZooKeeper.java:450)
        at org.apache.zookeeper.ZooKeeper.<init>(ZooKeeper.java:380)
        at 
org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.checkZk(RecoverableZooKeeper.java:141)
        at 
org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.<init>(RecoverableZooKeeper.java:128)
        at org.apache.hadoop.hbase.zookeeper.ZKUtil.connect(ZKUtil.java:137)
        at 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.<init>(ZooKeeperWatcher.java:185)
        at 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.<init>(ZooKeeperWatcher.java:153)
        at 
org.apache.hadoop.hbase.client.ZooKeeperKeepAliveConnection.<init>(ZooKeeperKeepAliveConnection.java:43)
        at 
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.getKeepAliveZooKeeperWatcher(ConnectionManager.java:1690)
        at 
org.apache.hadoop.hbase.client.ZooKeeperRegistry.getClusterId(ZooKeeperRegistry.java:104)
        at 
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.retrieveClusterId(ConnectionManager.java:905)
        at 
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.<init>(ConnectionManager.java:648)
        at 
org.apache.hadoop.hbase.client.CoprocessorHConnection.<init>(CoprocessorHConnection.java:99)
        at 
org.apache.hadoop.hbase.client.CoprocessorHConnection.<init>(CoprocessorHConnection.java:89)
        at 
org.apache.hadoop.hbase.client.CoprocessorHConnection.getConnectionForEnvironment(CoprocessorHConnection.java:61)
        at 
org.apache.hadoop.hbase.client.HTableWrapper.createWrapper(HTableWrapper.java:68)
        at 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost$Environment.getTable(CoprocessorHost.java:514)
        at 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost$Environment.getTable(CoprocessorHost.java:503)
        at 
org.apache.phoenix.util.ServerUtil.getHTableForCoprocessorScan(ServerUtil.java:230)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropChildViews(MetaDataEndpointImpl.java:2427)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.createTable(MetaDataEndpointImpl.java:2046)
        ... 10 more

Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: 
org.apache.hadoop.hbase.DoNotRetryIOException: SCHEMA1.N000024: unable to 
create new native thread
        at 
org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:120)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.createTable(MetaDataEndpointImpl.java:2420)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:17053)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:7873)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2008)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:1990)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:33652)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2188)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:112)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.OutOfMemoryError: unable to create new native thread
        at java.lang.Thread.start0(Native Method)
        at java.lang.Thread.start(Thread.java:717)
        at org.apache.zookeeper.ClientCnxn.start(ClientCnxn.java:405)
        at org.apache.zookeeper.ZooKeeper.<init>(ZooKeeper.java:450)
        at org.apache.zookeeper.ZooKeeper.<init>(ZooKeeper.java:380)
        at 
org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.checkZk(RecoverableZooKeeper.java:141)
        at 
org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.<init>(RecoverableZooKeeper.java:128)
        at org.apache.hadoop.hbase.zookeeper.ZKUtil.connect(ZKUtil.java:137)
        at 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.<init>(ZooKeeperWatcher.java:185)
        at 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.<init>(ZooKeeperWatcher.java:153)
        at 
org.apache.hadoop.hbase.client.ZooKeeperKeepAliveConnection.<init>(ZooKeeperKeepAliveConnection.java:43)
        at 
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.getKeepAliveZooKeeperWatcher(ConnectionManager.java:1690)
        at 
org.apache.hadoop.hbase.client.ZooKeeperRegistry.getClusterId(ZooKeeperRegistry.java:104)
        at 
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.retrieveClusterId(ConnectionManager.java:905)
        at 
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.<init>(ConnectionManager.java:648)
        at 
org.apache.hadoop.hbase.client.CoprocessorHConnection.<init>(CoprocessorHConnection.java:99)
        at 
org.apache.hadoop.hbase.client.CoprocessorHConnection.<init>(CoprocessorHConnection.java:89)
        at 
org.apache.hadoop.hbase.client.CoprocessorHConnection.getConnectionForEnvironment(CoprocessorHConnection.java:61)
        at 
org.apache.hadoop.hbase.client.HTableWrapper.createWrapper(HTableWrapper.java:68)
        at 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost$Environment.getTable(CoprocessorHost.java:514)
        at 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost$Environment.getTable(CoprocessorHost.java:503)
        at 
org.apache.phoenix.util.ServerUtil.getHTableForCoprocessorScan(ServerUtil.java:230)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropChildViews(MetaDataEndpointImpl.java:2427)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.createTable(MetaDataEndpointImpl.java:2046)
        ... 10 more


[INFO] Tests run: 104, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 
1,399.376 s - in org.apache.phoenix.end2end.AlterTableWithViewsIT
[INFO] 
[INFO] Results:
[INFO] 
[ERROR] Errors: 
[ERROR]   
AlterMultiTenantTableWithViewsIT.testAddColumnsToSaltedBaseTableWithViews:506 » 
PhoenixIO
[ERROR]   
AlterMultiTenantTableWithViewsIT.testAddDropColumnToBaseTablePropagatesToEntireViewHierarchy:102
 » PhoenixIO
[ERROR]   
AlterMultiTenantTableWithViewsIT.testAddingPkAndKeyValueColumnsToBaseTableWithDivergedView:456
 » PhoenixIO
[ERROR]   
AlterMultiTenantTableWithViewsIT.testCacheInvalidatedAfterDroppingColumnFromBaseTableWithViews:640
 » PhoenixIO
[ERROR]   
AlterMultiTenantTableWithViewsIT.testChangingPKOfBaseTableChangesPKForAllViews:220
 » PhoenixIO
[ERROR]   DropTableWithViewsIT.testDropTableWithChildViews:105 » PhoenixIO 
org.apache.ha...
[ERROR]   DropTableWithViewsIT.testDropTableWithChildViews:95 » PhoenixIO 
java.util.conc...
[ERROR]   
TenantSpecificViewIndexIT.testMultiCFViewIndexWithNamespaceMapping:86->testMultiCFViewIndex:127->createViewAndIndexesWithTenantId:200
 » PhoenixIO
[ERROR]   
TenantSpecificViewIndexIT.testMultiCFViewLocalIndex:92->testMultiCFViewIndex:139
 » PhoenixIO
[ERROR]   ViewIT.testConcurrentAddDifferentColumn:1487 » PhoenixIO 
org.apache.hadoop.hba...
[ERROR]   ViewIT.testViewAndTableAndDropCascadeWithIndexes:620 » PhoenixIO 
org.apache.ha...
[ERROR]   ViewIndexIT.testDeleteViewIndexSequences:129 » PhoenixIO 
org.apache.hadoop.hba...
[INFO] 
[ERROR] Tests run: 273, Failures: 0, Errors: 12, Skipped: 2
[INFO] 
[INFO] 
[INFO] --- maven-failsafe-plugin:2.20:verify (ParallelStatsEnabledTest) @ 
phoenix-core ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Phoenix 4.15.0-HBase-1.2-SNAPSHOT ........... SUCCESS [  4.172 s]
[INFO] Phoenix Core ....................................... FAILURE [  02:53 h]
[INFO] Phoenix - Flume .................................... SKIPPED
[INFO] Phoenix - Kafka .................................... SKIPPED
[INFO] Phoenix - Pig ...................................... SKIPPED
[INFO] Phoenix Query Server Client ........................ SKIPPED
[INFO] Phoenix Query Server ............................... SKIPPED
[INFO] Phoenix - Pherf .................................... SKIPPED
[INFO] Phoenix - Spark .................................... SKIPPED
[INFO] Phoenix - Hive ..................................... SKIPPED
[INFO] Phoenix Client ..................................... SKIPPED
[INFO] Phoenix Server ..................................... SKIPPED
[INFO] Phoenix Load Balancer .............................. SKIPPED
[INFO] Phoenix Assembly ................................... SKIPPED
[INFO] Phoenix - Tracing Web Application 4.15.0-HBase-1.2-SNAPSHOT SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:53 h
[INFO] Finished at: 2018-12-04T22:46:34Z
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal 
org.apache.maven.plugins:maven-failsafe-plugin:2.20:verify 
(ParallelStatsEnabledTest) on project phoenix-core: There are test failures.
[ERROR] 
[ERROR] Please refer to 
<https://builds.apache.org/job/Phoenix-4.x-HBase-1.2/ws/phoenix-core/target/failsafe-reports>
 for the individual test results.
[ERROR] Please refer to dump files (if any exist) [date]-jvmRun[N].dump, 
[date].dumpstream and [date]-jvmRun[N].dumpstream.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e 
switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please 
read the following articles:
[ERROR] [Help 1] 
http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :phoenix-core
Build step 'Invoke top-level Maven targets' marked build as failure
Archiving artifacts
Recording test results
Not sending mail to unregistered user k.me...@salesforce.com
Not sending mail to unregistered user ankitsingha...@gmail.com

Reply via email to