See 
<https://builds.apache.org/job/Phoenix-4.x-HBase-1.3/448/display/redirect?page=changes>

Changes:

[elserj] PHOENIX-5370 More missing license headers (addendum)

------------------------------------------
[...truncated 565.55 KB...]
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:17053)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8350)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2170)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2152)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:35076)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2376)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:123)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor$Handler.run(RpcExecutor.java:188)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor$Handler.run(RpcExecutor.java:168)
Caused by: java.lang.RuntimeException: java.lang.RuntimeException: 
java.lang.OutOfMemoryError: unable to create new native thread
        at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:220)
        at 
org.apache.hadoop.hbase.client.ClientScanner.call(ClientScanner.java:314)
        at 
org.apache.hadoop.hbase.client.ClientScanner.nextScanner(ClientScanner.java:289)
        at 
org.apache.hadoop.hbase.client.ClientScanner.initializeScannerInConstruction(ClientScanner.java:164)
        at 
org.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:159)
        at org.apache.hadoop.hbase.client.HTable.getScanner(HTable.java:796)
        at 
org.apache.hadoop.hbase.client.HTableWrapper.getScanner(HTableWrapper.java:215)
        at 
org.apache.phoenix.coprocessor.ViewFinder.findRelatedViews(ViewFinder.java:93)
        at 
org.apache.phoenix.coprocessor.ViewFinder.findAllRelatives(ViewFinder.java:65)
        at 
org.apache.phoenix.coprocessor.ViewFinder.findAllRelatives(ViewFinder.java:59)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.findAncestorViews(MetaDataEndpointImpl.java:2560)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.addDerivedColumnsFromAncestors(MetaDataEndpointImpl.java:741)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.combineColumns(MetaDataEndpointImpl.java:680)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.getTableFromCache(MetaDataEndpointImpl.java:1929)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.doGetTable(MetaDataEndpointImpl.java:3710)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.createTable(MetaDataEndpointImpl.java:2178)
        ... 9 more
Caused by: java.lang.RuntimeException: java.lang.OutOfMemoryError: unable to 
create new native thread
        at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:220)
        at 
org.apache.hadoop.hbase.client.ClientSmallReversedScanner.loadCache(ClientSmallReversedScanner.java:228)
        at 
org.apache.hadoop.hbase.client.ClientSmallReversedScanner.next(ClientSmallReversedScanner.java:202)
        at 
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegionInMeta(ConnectionManager.java:1298)
        at 
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegion(ConnectionManager.java:1197)
        at 
org.apache.hadoop.hbase.client.CoprocessorHConnection.locateRegion(CoprocessorHConnection.java:41)
        at 
org.apache.hadoop.hbase.client.RpcRetryingCallerWithReadReplicas.getRegionLocations(RpcRetryingCallerWithReadReplicas.java:303)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:156)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:60)
        at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:212)
        ... 24 more
Caused by: java.lang.OutOfMemoryError: unable to create new native thread
        at java.lang.Thread.start0(Native Method)
        at java.lang.Thread.start(Thread.java:717)
        at 
java.util.concurrent.ThreadPoolExecutor.addWorker(ThreadPoolExecutor.java:957)
        at 
java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1367)
        at 
org.apache.hadoop.hbase.client.ResultBoundedCompletionService.submit(ResultBoundedCompletionService.java:146)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.addCallsForCurrentReplica(ScannerCallableWithReplicas.java:287)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:170)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:60)
        at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:212)
        ... 33 more

Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: 
org.apache.hadoop.hbase.DoNotRetryIOException: SCHEMA2.N000007: 
java.lang.RuntimeException: java.lang.OutOfMemoryError: unable to create new 
native thread
        at 
org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:120)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.createTable(MetaDataEndpointImpl.java:2463)
        at 
org.apache.phoenix.coprocessor.generated.MetaDataProtos$MetaDataService.callMethod(MetaDataProtos.java:17053)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8350)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2170)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2152)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:35076)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2376)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:123)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor$Handler.run(RpcExecutor.java:188)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor$Handler.run(RpcExecutor.java:168)
Caused by: java.lang.RuntimeException: java.lang.RuntimeException: 
java.lang.OutOfMemoryError: unable to create new native thread
        at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:220)
        at 
org.apache.hadoop.hbase.client.ClientScanner.call(ClientScanner.java:314)
        at 
org.apache.hadoop.hbase.client.ClientScanner.nextScanner(ClientScanner.java:289)
        at 
org.apache.hadoop.hbase.client.ClientScanner.initializeScannerInConstruction(ClientScanner.java:164)
        at 
org.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:159)
        at org.apache.hadoop.hbase.client.HTable.getScanner(HTable.java:796)
        at 
org.apache.hadoop.hbase.client.HTableWrapper.getScanner(HTableWrapper.java:215)
        at 
org.apache.phoenix.coprocessor.ViewFinder.findRelatedViews(ViewFinder.java:93)
        at 
org.apache.phoenix.coprocessor.ViewFinder.findAllRelatives(ViewFinder.java:65)
        at 
org.apache.phoenix.coprocessor.ViewFinder.findAllRelatives(ViewFinder.java:59)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.findAncestorViews(MetaDataEndpointImpl.java:2560)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.addDerivedColumnsFromAncestors(MetaDataEndpointImpl.java:741)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.combineColumns(MetaDataEndpointImpl.java:680)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.getTableFromCache(MetaDataEndpointImpl.java:1929)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.doGetTable(MetaDataEndpointImpl.java:3710)
        at 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.createTable(MetaDataEndpointImpl.java:2178)
        ... 9 more
Caused by: java.lang.RuntimeException: java.lang.OutOfMemoryError: unable to 
create new native thread
        at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:220)
        at 
org.apache.hadoop.hbase.client.ClientSmallReversedScanner.loadCache(ClientSmallReversedScanner.java:228)
        at 
org.apache.hadoop.hbase.client.ClientSmallReversedScanner.next(ClientSmallReversedScanner.java:202)
        at 
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegionInMeta(ConnectionManager.java:1298)
        at 
org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegion(ConnectionManager.java:1197)
        at 
org.apache.hadoop.hbase.client.CoprocessorHConnection.locateRegion(CoprocessorHConnection.java:41)
        at 
org.apache.hadoop.hbase.client.RpcRetryingCallerWithReadReplicas.getRegionLocations(RpcRetryingCallerWithReadReplicas.java:303)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:156)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:60)
        at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:212)
        ... 24 more
Caused by: java.lang.OutOfMemoryError: unable to create new native thread
        at java.lang.Thread.start0(Native Method)
        at java.lang.Thread.start(Thread.java:717)
        at 
java.util.concurrent.ThreadPoolExecutor.addWorker(ThreadPoolExecutor.java:957)
        at 
java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1367)
        at 
org.apache.hadoop.hbase.client.ResultBoundedCompletionService.submit(ResultBoundedCompletionService.java:146)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.addCallsForCurrentReplica(ScannerCallableWithReplicas.java:287)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:170)
        at 
org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:60)
        at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:212)
        ... 33 more


[ERROR] 
testUpdatableSaltedView(org.apache.phoenix.end2end.TenantSpecificViewIndexSaltedIT)
  Time elapsed: 2.449 s  <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: 
java.util.concurrent.ExecutionException: java.io.IOException: DataStreamer 
Exception: 
        at 
org.apache.phoenix.end2end.TenantSpecificViewIndexSaltedIT.testUpdatableSaltedView(TenantSpecificViewIndexSaltedIT.java:28)
Caused by: java.io.IOException: java.util.concurrent.ExecutionException: 
java.io.IOException: DataStreamer Exception: 
        at 
org.apache.phoenix.end2end.TenantSpecificViewIndexSaltedIT.testUpdatableSaltedView(TenantSpecificViewIndexSaltedIT.java:28)
Caused by: org.apache.hadoop.ipc.RemoteException: 
java.util.concurrent.ExecutionException: java.io.IOException: DataStreamer 
Exception: 

[ERROR] Tests run: 9, Failures: 0, Errors: 1, Skipped: 0, Time elapsed: 143.956 
s <<< FAILURE! - in org.apache.phoenix.end2end.AlterMultiTenantTableWithViewsIT
[ERROR] 
testChangingPKOfBaseTableChangesPKForAllViews(org.apache.phoenix.end2end.AlterMultiTenantTableWithViewsIT)
  Time elapsed: 2.326 s  <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: 
java.util.concurrent.ExecutionException: java.io.IOException: DataStreamer 
Exception: 
        at 
org.apache.phoenix.end2end.AlterMultiTenantTableWithViewsIT.testChangingPKOfBaseTableChangesPKForAllViews(AlterMultiTenantTableWithViewsIT.java:193)
Caused by: java.io.IOException: java.util.concurrent.ExecutionException: 
java.io.IOException: DataStreamer Exception: 
        at 
org.apache.phoenix.end2end.AlterMultiTenantTableWithViewsIT.testChangingPKOfBaseTableChangesPKForAllViews(AlterMultiTenantTableWithViewsIT.java:193)
Caused by: org.apache.hadoop.ipc.RemoteException: 
java.util.concurrent.ExecutionException: java.io.IOException: DataStreamer 
Exception: 

[INFO] Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 176.205 
s - in org.apache.phoenix.end2end.TenantSpecificViewIndexIT
[WARNING] Tests run: 16, Failures: 0, Errors: 0, Skipped: 2, Time elapsed: 
216.041 s - in org.apache.phoenix.end2end.index.ViewIndexIT
[ERROR] Tests run: 56, Failures: 0, Errors: 1, Skipped: 0, Time elapsed: 
565.034 s <<< FAILURE! - in org.apache.phoenix.end2end.AlterTableWithViewsIT
[ERROR] 
testAddNewColumnsToBaseTableWithViews[AlterTableWithViewsIT_columnEncoded=false,
 multiTenant=false, 
salted=false](org.apache.phoenix.end2end.AlterTableWithViewsIT)  Time elapsed: 
4.39 s  <<< ERROR!
java.lang.RuntimeException: java.lang.OutOfMemoryError: unable to create new 
native thread
        at 
org.apache.phoenix.end2end.AlterTableWithViewsIT.testAddNewColumnsToBaseTableWithViews(AlterTableWithViewsIT.java:146)
Caused by: java.lang.OutOfMemoryError: unable to create new native thread
        at 
org.apache.phoenix.end2end.AlterTableWithViewsIT.testAddNewColumnsToBaseTableWithViews(AlterTableWithViewsIT.java:146)

[INFO] 
[INFO] Results:
[INFO] 
[ERROR] Errors: 
[ERROR]   
AlterMultiTenantTableWithViewsIT.testChangingPKOfBaseTableChangesPKForAllViews:193
 » PhoenixIO
[ERROR]   AlterTableWithViewsIT.testAddNewColumnsToBaseTableWithViews:146 » 
Runtime java...
[ERROR]   DropTableWithViewsIT.testDropTableWithChildViews:127 » PhoenixIO 
org.apache.ha...
[ERROR]   
TenantSpecificViewIndexSaltedIT.testUpdatableSaltedView:28->BaseTenantSpecificViewIndexIT.testUpdatableView:46->BaseTenantSpecificViewIndexIT.testUpdatableView:52->BaseTenantSpecificViewIndexIT.createBaseTable:116
 » PhoenixIO
[ERROR]   
TenantSpecificViewIndexSaltedIT.testUpdatableViewsWithSameNameDifferentTenants:33->BaseTenantSpecificViewIndexIT.testUpdatableViewsWithSameNameDifferentTenants:78->BaseTenantSpecificViewIndexIT.testUpdatableViewsWithSameNameDifferentTenants:96->BaseTenantSpecificViewIndexIT.createAndVerifyIndex:136
 » PhoenixIO
[ERROR]   
ViewIT.doSetup:136->BaseTest.setUpTestDriver:521->BaseTest.checkClusterInitialized:435->BaseTest.setUpTestCluster:449->BaseTest.initMiniCluster:550
 » Runtime
[ERROR]   
ViewMetadataIT.doSetup:77->BaseTest.setUpTestDriver:521->BaseTest.checkClusterInitialized:435->BaseTest.setUpTestCluster:449->BaseTest.initMiniCluster:550
 » Runtime
[INFO] 
[ERROR] Tests run: 101, Failures: 0, Errors: 7, Skipped: 2
[INFO] 
[INFO] 
[INFO] --- maven-failsafe-plugin:2.20:verify (ParallelStatsEnabledTest) @ 
phoenix-core ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary for Apache Phoenix 4.15.0-HBase-1.3-SNAPSHOT:
[INFO] 
[INFO] Apache Phoenix ..................................... SUCCESS [  1.983 s]
[INFO] Phoenix Core ....................................... FAILURE [  03:06 h]
[INFO] Phoenix - Pherf .................................... SKIPPED
[INFO] Phoenix Client ..................................... SKIPPED
[INFO] Phoenix Server ..................................... SKIPPED
[INFO] Phoenix Assembly ................................... SKIPPED
[INFO] Phoenix - Tracing Web Application .................. SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time:  03:06 h
[INFO] Finished at: 2019-06-26T01:22:17Z
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal 
org.apache.maven.plugins:maven-failsafe-plugin:2.20:verify 
(ParallelStatsEnabledTest) on project phoenix-core: There are test failures.
[ERROR] 
[ERROR] Please refer to 
<https://builds.apache.org/job/Phoenix-4.x-HBase-1.3/ws/phoenix-core/target/failsafe-reports>
 for the individual test results.
[ERROR] Please refer to dump files (if any exist) [date]-jvmRun[N].dump, 
[date].dumpstream and [date]-jvmRun[N].dumpstream.
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e 
switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please 
read the following articles:
[ERROR] [Help 1] 
http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :phoenix-core
Build step 'Invoke top-level Maven targets' marked build as failure
Archiving artifacts
ERROR: Step ?Archive the artifacts? aborted due to exception: 
hudson.remoting.ProxyException: java.lang.Throwable
        at 
hudson.remoting.FastPipedOutputStream.<init>(FastPipedOutputStream.java:49)
        at hudson.remoting.Pipe.readObject(Pipe.java:191)
        at sun.reflect.GeneratedMethodAccessor189.invoke(Unknown Source)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
java.io.ObjectStreamClass.invokeReadObject(ObjectStreamClass.java:1058)
        at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2136)
        at 
java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2027)
        at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1535)
        at 
java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2245)
        at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2169)
        at 
java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2027)
        at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1535)
        at 
java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2245)
        at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2169)
        at 
java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2027)
        at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1535)
        at java.io.ObjectInputStream.readObject(ObjectInputStream.java:422)
        at hudson.remoting.UserRequest.deserialize(UserRequest.java:291)
        at hudson.remoting.UserRequest.perform(UserRequest.java:190)
        at hudson.remoting.UserRequest.perform(UserRequest.java:54)
        at hudson.remoting.Request$2.run(Request.java:369)
Caused: hudson.remoting.ProxyException: java.io.IOException: Reader side has 
already been abandoned
        at 
hudson.remoting.FastPipedOutputStream.sink(FastPipedOutputStream.java:81)
        at 
hudson.remoting.FastPipedOutputStream.write(FastPipedOutputStream.java:151)
        at 
hudson.remoting.FastPipedOutputStream.write(FastPipedOutputStream.java:138)
        at 
hudson.remoting.ProxyOutputStream$Chunk$1.run(ProxyOutputStream.java:255)
        at hudson.remoting.PipeWriter$1.run(PipeWriter.java:158)
        at 
java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at 
hudson.remoting.SingleLaneExecutorService$1.run(SingleLaneExecutorService.java:131)
        at 
hudson.remoting.InterceptingExecutorService$1.call(InterceptingExecutorService.java:72)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:748)
Caused: java.io.IOException: Pipe is already closed
        at hudson.remoting.PipeWindow.checkDeath(PipeWindow.java:122)
        at hudson.remoting.PipeWindow$Real.get(PipeWindow.java:220)
        at hudson.remoting.ProxyOutputStream.write(ProxyOutputStream.java:125)
        at 
java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:82)
        at java.io.BufferedOutputStream.write(BufferedOutputStream.java:126)
        at java.util.zip.GZIPOutputStream.finish(GZIPOutputStream.java:168)
        at 
java.util.zip.DeflaterOutputStream.close(DeflaterOutputStream.java:238)
        at jsync.protocol.BaseWriter.close(BaseWriter.java:14)
        at jsync.protocol.BlockIterableWriter.close(BlockIterableWriter.java:26)
        at 
com.cloudbees.jenkins.plugins.jsync.archiver.JSyncArtifactManager.remoteSync(JSyncArtifactManager.java:151)
        at 
com.cloudbees.jenkins.plugins.jsync.archiver.JSyncArtifactManager.archive(JSyncArtifactManager.java:76)
        at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:235)
        at 
hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
        at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
        at 
hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
        at 
hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
        at hudson.model.Build$BuildExecution.post2(Build.java:186)
        at 
hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
        at hudson.model.Run.execute(Run.java:1835)
        at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
        at hudson.model.ResourceController.execute(ResourceController.java:97)
        at hudson.model.Executor.run(Executor.java:429)
[JIRA] Updating issue PHOENIX-5370
Recording test results
Not sending mail to unregistered user jinqian...@alibaba-inc.com
Not sending mail to unregistered user monani.mi...@gmail.com
Not sending mail to unregistered user mmon...@salesforce.com
Not sending mail to unregistered user ankitsingha...@gmail.com
Not sending mail to unregistered user k.me...@salesforce.com
Not sending mail to unregistered user Rajeshbabu Chintaguntla
Not sending mail to unregistered user s.ka...@salesforce.com

Reply via email to