See <https://builds.apache.org/job/Phoenix-4.x-HBase-0.98/1367/changes>

Changes:

[jamestaylor] PHOENIX-3457 Disable parallel run of tests and increase memory

------------------------------------------
[...truncated 1077 lines...]
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE6
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)


testImportOneLocalIndexTable(org.apache.phoenix.end2end.CsvBulkLoadToolIT)  
Time elapsed: 2,406.615 sec  <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: callTimeout=1200000, 
callDuration=1202387: row '  TABLE5_IDX' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478344710729.3876bc396596d88a29737624dc0e99a2., 
hostname=penates.apache.org,34160,1478344703990, seqNum=1
        at 
org.apache.phoenix.end2end.CsvBulkLoadToolIT.testImportOneIndexTable(CsvBulkLoadToolIT.java:309)
        at 
org.apache.phoenix.end2end.CsvBulkLoadToolIT.testImportOneLocalIndexTable(CsvBulkLoadToolIT.java:297)
Caused by: java.net.SocketTimeoutException: callTimeout=1200000, 
callDuration=1202387: row '  TABLE5_IDX' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478344710729.3876bc396596d88a29737624dc0e99a2., 
hostname=penates.apache.org,34160,1478344703990, seqNum=1
Caused by: java.net.SocketTimeoutException: Call to 
penates.apache.org/67.195.81.186:34160 failed because 
java.net.SocketTimeoutException: 1200000 millis timeout while waiting for 
channel to be ready for read. ch : java.nio.channels.SocketChannel[connected 
local=/67.195.81.186:38820 remote=penates.apache.org/67.195.81.186:34160]
Caused by: java.net.SocketTimeoutException: 1200000 millis timeout while 
waiting for channel to be ready for read. ch : 
java.nio.channels.SocketChannel[connected local=/67.195.81.186:38820 
remote=penates.apache.org/67.195.81.186:34160]

testImportOneLocalIndexTable(org.apache.phoenix.end2end.CsvBulkLoadToolIT)  
Time elapsed: 2,406.616 sec  <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: callTimeout=1200000, 
callDuration=1222066: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478344710729.3876bc396596d88a29737624dc0e99a2., 
hostname=penates.apache.org,34160,1478344703990, seqNum=1
Caused by: java.net.SocketTimeoutException: callTimeout=1200000, 
callDuration=1222066: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478344710729.3876bc396596d88a29737624dc0e99a2., 
hostname=penates.apache.org,34160,1478344703990, seqNum=1
Caused by: java.io.IOException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)


testInvalidArguments(org.apache.phoenix.end2end.CsvBulkLoadToolIT)  Time 
elapsed: 1,200.221 sec  <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: callTimeout=1200000, 
callDuration=1222279: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478344710729.3876bc396596d88a29737624dc0e99a2., 
hostname=penates.apache.org,34160,1478344703990, seqNum=1
Caused by: java.net.SocketTimeoutException: callTimeout=1200000, 
callDuration=1222279: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478344710729.3876bc396596d88a29737624dc0e99a2., 
hostname=penates.apache.org,34160,1478344703990, seqNum=1
Caused by: java.io.IOException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)


testImportWithIndex(org.apache.phoenix.end2end.CsvBulkLoadToolIT)  Time 
elapsed: 1,205.077 sec  <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: callTimeout=1200000, 
callDuration=1221609: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478344710729.3876bc396596d88a29737624dc0e99a2., 
hostname=penates.apache.org,34160,1478344703990, seqNum=1
Caused by: java.net.SocketTimeoutException: callTimeout=1200000, 
callDuration=1221609: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478344710729.3876bc396596d88a29737624dc0e99a2., 
hostname=penates.apache.org,34160,1478344703990, seqNum=1
Caused by: java.io.IOException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)


testImportOneIndexTable(org.apache.phoenix.end2end.CsvBulkLoadToolIT)  Time 
elapsed: 1,204.958 sec  <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: callTimeout=1200000, 
callDuration=1221770: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478344710729.3876bc396596d88a29737624dc0e99a2., 
hostname=penates.apache.org,34160,1478344703990, seqNum=1
Caused by: java.net.SocketTimeoutException: callTimeout=1200000, 
callDuration=1221770: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478344710729.3876bc396596d88a29737624dc0e99a2., 
hostname=penates.apache.org,34160,1478344703990, seqNum=1
Caused by: java.io.IOException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)



Results :

Tests in error: 
  
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO
  
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO
  
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO
org.apache.phoenix.end2end.CsvBulkLoadToolIT.testImportOneLocalIndexTable(org.apache.phoenix.end2end.CsvBulkLoadToolIT)
  Run 1: 
CsvBulkLoadToolIT.testImportOneLocalIndexTable:297->testImportOneIndexTable:309 
» PhoenixIO
  Run 2: 
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO

  
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO
org.apache.phoenix.end2end.CsvBulkLoadToolIT.testImportWithLocalIndex(org.apache.phoenix.end2end.CsvBulkLoadToolIT)
  Run 1: CsvBulkLoadToolIT.testImportWithLocalIndex:258 » PhoenixIO 
callTimeout=1200000...
  Run 2: 
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO

  
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO
  
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO

Tests run: 211, Failures: 0, Errors: 8, Skipped: 33

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:verify (ParallelStatsEnabledTest) @ 
phoenix-core ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Phoenix ..................................... SUCCESS [  4.359 s]
[INFO] Phoenix Core ....................................... FAILURE [  03:58 h]
[INFO] Phoenix - Flume .................................... SKIPPED
[INFO] Phoenix - Pig ...................................... SKIPPED
[INFO] Phoenix Query Server Client ........................ SKIPPED
[INFO] Phoenix Query Server ............................... SKIPPED
[INFO] Phoenix - Pherf .................................... SKIPPED
[INFO] Phoenix - Spark .................................... SKIPPED
[INFO] Phoenix - Hive ..................................... SKIPPED
[INFO] Phoenix Client ..................................... SKIPPED
[INFO] Phoenix Server ..................................... SKIPPED
[INFO] Phoenix Assembly ................................... SKIPPED
[INFO] Phoenix - Tracing Web Application .................. SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 03:58 h
[INFO] Finished at: 2016-11-05T14:39:42+00:00
[INFO] Final Memory: 59M/542M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal 
org.apache.maven.plugins:maven-failsafe-plugin:2.19.1:verify 
(ParallelStatsEnabledTest) on project phoenix-core: There are test failures.
[ERROR] 
[ERROR] Please refer to 
<https://builds.apache.org/job/Phoenix-4.x-HBase-0.98/ws/phoenix-core/target/failsafe-reports>
 for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e 
switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please 
read the following articles:
[ERROR] [Help 1] 
http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :phoenix-core
Build step 'Invoke top-level Maven targets' marked build as failure
Archiving artifacts
Compressed 1.38 GB of artifacts by 30.1% relative to #1272
Recording test results

Reply via email to