See <https://builds.apache.org/job/Phoenix-4.x-HBase-0.98/1365/changes>

Changes:

[jamestaylor] PHOENIX-3199 ServerCacheClient sends cache to all regions 
unnecessarily

------------------------------------------
[...truncated 1134 lines...]
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE6
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)


testImportOneLocalIndexTable(org.apache.phoenix.end2end.CsvBulkLoadToolIT)  
Time elapsed: 2,407.29 sec  <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: callTimeout=1200000, 
callDuration=1202405: row '  TABLE5_IDX' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478280011578.07bc3beb57d1c0bcc417777ce2b994ab., 
hostname=asf910.gq1.ygridcore.net,55185,1478280000634, seqNum=1
        at 
org.apache.phoenix.end2end.CsvBulkLoadToolIT.testImportOneIndexTable(CsvBulkLoadToolIT.java:309)
        at 
org.apache.phoenix.end2end.CsvBulkLoadToolIT.testImportOneLocalIndexTable(CsvBulkLoadToolIT.java:297)
Caused by: java.net.SocketTimeoutException: callTimeout=1200000, 
callDuration=1202405: row '  TABLE5_IDX' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478280011578.07bc3beb57d1c0bcc417777ce2b994ab., 
hostname=asf910.gq1.ygridcore.net,55185,1478280000634, seqNum=1
Caused by: java.net.SocketTimeoutException: Call to 
asf910.gq1.ygridcore.net/67.195.81.154:55185 failed because 
java.net.SocketTimeoutException: 1200000 millis timeout while waiting for 
channel to be ready for read. ch : java.nio.channels.SocketChannel[connected 
local=/67.195.81.154:45997 remote=asf910.gq1.ygridcore.net/67.195.81.154:55185]
Caused by: java.net.SocketTimeoutException: 1200000 millis timeout while 
waiting for channel to be ready for read. ch : 
java.nio.channels.SocketChannel[connected local=/67.195.81.154:45997 
remote=asf910.gq1.ygridcore.net/67.195.81.154:55185]

testImportOneLocalIndexTable(org.apache.phoenix.end2end.CsvBulkLoadToolIT)  
Time elapsed: 2,407.29 sec  <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: callTimeout=1200000, 
callDuration=1222418: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478280011578.07bc3beb57d1c0bcc417777ce2b994ab., 
hostname=asf910.gq1.ygridcore.net,55185,1478280000634, seqNum=1
Caused by: java.net.SocketTimeoutException: callTimeout=1200000, 
callDuration=1222418: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478280011578.07bc3beb57d1c0bcc417777ce2b994ab., 
hostname=asf910.gq1.ygridcore.net,55185,1478280000634, seqNum=1
Caused by: java.io.IOException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)


testInvalidArguments(org.apache.phoenix.end2end.CsvBulkLoadToolIT)  Time 
elapsed: 1,200.315 sec  <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: callTimeout=1200000, 
callDuration=1222146: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478280011578.07bc3beb57d1c0bcc417777ce2b994ab., 
hostname=asf910.gq1.ygridcore.net,55185,1478280000634, seqNum=1
Caused by: java.net.SocketTimeoutException: callTimeout=1200000, 
callDuration=1222146: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478280011578.07bc3beb57d1c0bcc417777ce2b994ab., 
hostname=asf910.gq1.ygridcore.net,55185,1478280000634, seqNum=1
Caused by: java.io.IOException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)


testImportWithIndex(org.apache.phoenix.end2end.CsvBulkLoadToolIT)  Time 
elapsed: 1,207.479 sec  <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: callTimeout=1200000, 
callDuration=1222164: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478280011578.07bc3beb57d1c0bcc417777ce2b994ab., 
hostname=asf910.gq1.ygridcore.net,55185,1478280000634, seqNum=1
Caused by: java.net.SocketTimeoutException: callTimeout=1200000, 
callDuration=1222164: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478280011578.07bc3beb57d1c0bcc417777ce2b994ab., 
hostname=asf910.gq1.ygridcore.net,55185,1478280000634, seqNum=1
Caused by: java.io.IOException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)


testImportOneIndexTable(org.apache.phoenix.end2end.CsvBulkLoadToolIT)  Time 
elapsed: 1,207.494 sec  <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: callTimeout=1200000, 
callDuration=1222410: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478280011578.07bc3beb57d1c0bcc417777ce2b994ab., 
hostname=asf910.gq1.ygridcore.net,55185,1478280000634, seqNum=1
Caused by: java.net.SocketTimeoutException: callTimeout=1200000, 
callDuration=1222410: row '  TABLE5' on table 'SYSTEM.CATALOG' at 
region=SYSTEM.CATALOG,,1478280011578.07bc3beb57d1c0bcc417777ce2b994ab., 
hostname=asf910.gq1.ygridcore.net,55185,1478280000634, seqNum=1
Caused by: java.io.IOException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: 
java.io.IOException: Timed out waiting for lock for row: \x00\x00TABLE5
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLockInternal(HRegion.java:3804)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3766)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.getRowLock(HRegion.java:3830)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.acquireLock(MetaDataEndpointImpl.java:1568)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.dropTable(MetaDataEndpointImpl.java:1710)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:16297)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:6041)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execServiceOnRegion(HRegionServer.java:3520)
        at 
org.apache.hadoop.hbase.regionserver.HRegionServer.execService(HRegionServer.java:3502)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:31194)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2149)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)



Results :

Tests in error: 
  
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO
  
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO
  
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO
org.apache.phoenix.end2end.CsvBulkLoadToolIT.testImportOneLocalIndexTable(org.apache.phoenix.end2end.CsvBulkLoadToolIT)
  Run 1: 
CsvBulkLoadToolIT.testImportOneLocalIndexTable:297->testImportOneIndexTable:309 
» PhoenixIO
  Run 2: 
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO

  
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO
org.apache.phoenix.end2end.CsvBulkLoadToolIT.testImportWithLocalIndex(org.apache.phoenix.end2end.CsvBulkLoadToolIT)
  Run 1: CsvBulkLoadToolIT.testImportWithLocalIndex:258 » PhoenixIO 
callTimeout=1200000...
  Run 2: 
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO

  
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO
  
CsvBulkLoadToolIT>BaseOwnClusterIT.cleanUpAfterTest:35->BaseTest.deletePriorMetaData:857->BaseTest.deletePriorTables:865->BaseTest.deletePriorTables:876->BaseTest.deletePriorTables:921
 » PhoenixIO

Tests run: 131, Failures: 0, Errors: 8, Skipped: 1

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:verify (ParallelStatsEnabledTest) @ 
phoenix-core ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Phoenix ..................................... SUCCESS [  4.340 s]
[INFO] Phoenix Core ....................................... FAILURE [  04:03 h]
[INFO] Phoenix - Flume .................................... SKIPPED
[INFO] Phoenix - Pig ...................................... SKIPPED
[INFO] Phoenix Query Server Client ........................ SKIPPED
[INFO] Phoenix Query Server ............................... SKIPPED
[INFO] Phoenix - Pherf .................................... SKIPPED
[INFO] Phoenix - Spark .................................... SKIPPED
[INFO] Phoenix - Hive ..................................... SKIPPED
[INFO] Phoenix Client ..................................... SKIPPED
[INFO] Phoenix Server ..................................... SKIPPED
[INFO] Phoenix Assembly ................................... SKIPPED
[INFO] Phoenix - Tracing Web Application .................. SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 04:03 h
[INFO] Finished at: 2016-11-04T20:41:44+00:00
[INFO] Final Memory: 60M/819M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal 
org.apache.maven.plugins:maven-failsafe-plugin:2.19.1:verify 
(ParallelStatsEnabledTest) on project phoenix-core: There are test failures.
[ERROR] 
[ERROR] Please refer to 
<https://builds.apache.org/job/Phoenix-4.x-HBase-0.98/ws/phoenix-core/target/failsafe-reports>
 for the individual test results.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e 
switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please 
read the following articles:
[ERROR] [Help 1] 
http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :phoenix-core
Build step 'Invoke top-level Maven targets' marked build as failure
Archiving artifacts
Compressed 1.04 GB of artifacts by 47.2% relative to #1272
Recording test results

Reply via email to