See <https://builds.apache.org/job/Pig-trunk/1059/changes>
Changes: [daijy] PIG-2183: Pig not working with Hadoop 0.20.203.0 ------------------------------------------ [...truncated 37949 lines...] [junit] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25) [junit] at java.lang.reflect.Method.invoke(Method.java:597) [junit] at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:508) [junit] at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:959) [junit] at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:955) [junit] at java.security.AccessController.doPrivileged(Native Method) [junit] at javax.security.auth.Subject.doAs(Subject.java:396) [junit] at org.apache.hadoop.ipc.Server$Handler.run(Server.java:953) [junit] [junit] org.apache.hadoop.ipc.RemoteException: java.io.IOException: Could not complete write to file /tmp/TestStore-output--8635028120890681173.txt_cleanupOnFailure_succeeded2 by DFSClient_-7093338 [junit] at org.apache.hadoop.hdfs.server.namenode.NameNode.complete(NameNode.java:449) [junit] at sun.reflect.GeneratedMethodAccessor18.invoke(Unknown Source) [junit] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25) [junit] at java.lang.reflect.Method.invoke(Method.java:597) [junit] at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:508) [junit] at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:959) [junit] at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:955) [junit] at java.security.AccessController.doPrivileged(Native Method) [junit] at javax.security.auth.Subject.doAs(Subject.java:396) [junit] at org.apache.hadoop.ipc.Server$Handler.run(Server.java:953) [junit] [junit] at org.apache.hadoop.ipc.Client.call(Client.java:740) [junit] at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:220) [junit] at $Proxy0.complete(Unknown Source) [junit] at sun.reflect.GeneratedMethodAccessor18.invoke(Unknown Source) [junit] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25) [junit] at java.lang.reflect.Method.invoke(Method.java:597) [junit] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:82) [junit] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:59) [junit] at $Proxy0.complete(Unknown Source) [junit] at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.closeInternal(DFSClient.java:3264) [junit] at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.close(DFSClient.java:3188) [junit] at org.apache.hadoop.hdfs.DFSClient$LeaseChecker.close(DFSClient.java:1043) [junit] at org.apache.hadoop.hdfs.DFSClient.close(DFSClient.java:237) [junit] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:269) [junit] at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:83) [junit] at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77) [junit] at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68) [junit] at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:127) [junit] at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) [junit] at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39) [junit] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25) [junit] at java.lang.reflect.Method.invoke(Method.java:597) [junit] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44) [junit] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15) [junit] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41) [junit] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37) [junit] at org.junit.runners.ParentRunner.run(ParentRunner.java:220) [junit] at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39) [junit] at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420) [junit] at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911) [junit] at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768) [junit] 11/08/10 22:32:00 WARN hdfs.StateChange: DIR* NameSystem.completeFile: failed to complete /tmp/TestStore-output-936224691883146672.txt_cleanupOnFailure_succeeded1 because dir.getFileBlocks() is null and pendingFile is null [junit] 11/08/10 22:32:00 INFO ipc.Server: IPC Server handler 8 on 39839, call complete(/tmp/TestStore-output-936224691883146672.txt_cleanupOnFailure_succeeded1, DFSClient_-7093338) from 127.0.0.1:47344: error: java.io.IOException: Could not complete write to file /tmp/TestStore-output-936224691883146672.txt_cleanupOnFailure_succeeded1 by DFSClient_-7093338 [junit] java.io.IOException: Could not complete write to file /tmp/TestStore-output-936224691883146672.txt_cleanupOnFailure_succeeded1 by DFSClient_-7093338 [junit] at org.apache.hadoop.hdfs.server.namenode.NameNode.complete(NameNode.java:449) [junit] at sun.reflect.GeneratedMethodAccessor18.invoke(Unknown Source) [junit] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25) [junit] at java.lang.reflect.Method.invoke(Method.java:597) [junit] at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:508) [junit] at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:959) [junit] at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:955) [junit] at java.security.AccessController.doPrivileged(Native Method) [junit] at javax.security.auth.Subject.doAs(Subject.java:396) [junit] at org.apache.hadoop.ipc.Server$Handler.run(Server.java:953) [junit] 11/08/10 22:32:00 ERROR hdfs.DFSClient: Exception closing file /tmp/TestStore-output-936224691883146672.txt_cleanupOnFailure_succeeded1 : org.apache.hadoop.ipc.RemoteException: java.io.IOException: Could not complete write to file /tmp/TestStore-output-936224691883146672.txt_cleanupOnFailure_succeeded1 by DFSClient_-7093338 [junit] at org.apache.hadoop.hdfs.server.namenode.NameNode.complete(NameNode.java:449) [junit] at sun.reflect.GeneratedMethodAccessor18.invoke(Unknown Source) [junit] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25) [junit] at java.lang.reflect.Method.invoke(Method.java:597) [junit] at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:508) [junit] at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:959) [junit] at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:955) [junit] at java.security.AccessController.doPrivileged(Native Method) [junit] at javax.security.auth.Subject.doAs(Subject.java:396) [junit] at org.apache.hadoop.ipc.Server$Handler.run(Server.java:953) [junit] [junit] org.apache.hadoop.ipc.RemoteException: java.io.IOException: Could not complete write to file /tmp/TestStore-output-936224691883146672.txt_cleanupOnFailure_succeeded1 by DFSClient_-7093338 [junit] at org.apache.hadoop.hdfs.server.namenode.NameNode.complete(NameNode.java:449) [junit] at sun.reflect.GeneratedMethodAccessor18.invoke(Unknown Source) [junit] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25) [junit] Shutting down the Mini HDFS Cluster [junit] at java.lang.reflect.Method.invoke(Method.java:597) [junit] at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:508) [junit] at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:959) [junit] at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:955) [junit] at java.security.AccessController.doPrivileged(Native Method) [junit] at javax.security.auth.Subject.doAs(Subject.java:396) [junit] at org.apache.hadoop.ipc.Server$Handler.run(Server.java:953) [junit] [junit] at org.apache.hadoop.ipc.Client.call(Client.java:740) [junit] at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:220) [junit] at $Proxy0.complete(Unknown Source) [junit] at sun.reflect.GeneratedMethodAccessor18.invoke(Unknown Source) [junit] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25) [junit] at java.lang.reflect.Method.invoke(Method.java:597) [junit] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:82) [junit] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:59) [junit] at $Proxy0.complete(Unknown Source) [junit] at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.closeInternal(DFSClient.java:3264) [junit] at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.close(DFSClient.java:3188) [junit] at org.apache.hadoop.hdfs.DFSClient$LeaseChecker.close(DFSClient.java:1043) [junit] at org.apache.hadoop.hdfs.DFSClient.close(DFSClient.java:237) [junit] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:269) [junit] at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsClusters(MiniGenericCluster.java:83) [junit] at org.apache.pig.test.MiniGenericCluster.shutdownMiniDfsAndMrClusters(MiniGenericCluster.java:77) [junit] at org.apache.pig.test.MiniGenericCluster.shutDown(MiniGenericCluster.java:68) [junit] at org.apache.pig.test.TestStore.oneTimeTearDown(TestStore.java:127) [junit] at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) [junit] at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39) [junit] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25) [junit] at java.lang.reflect.Method.invoke(Method.java:597) [junit] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44) [junit] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15) [junit] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41) [junit] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:37) [junit] at org.junit.runners.ParentRunner.run(ParentRunner.java:220) [junit] at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39) [junit] at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420) [junit] at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911) [junit] at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768) [junit] Shutting down DataNode 3 [junit] 11/08/10 22:32:00 INFO ipc.Server: Stopping server on 58098 [junit] 11/08/10 22:32:00 INFO ipc.Server: IPC Server handler 1 on 58098: exiting [junit] 11/08/10 22:32:00 INFO ipc.Server: IPC Server handler 0 on 58098: exiting [junit] 11/08/10 22:32:00 INFO ipc.Server: Stopping IPC Server listener on 58098 [junit] 11/08/10 22:32:00 INFO ipc.Server: IPC Server handler 2 on 58098: exiting [junit] 11/08/10 22:32:00 INFO ipc.Server: Stopping IPC Server Responder [junit] 11/08/10 22:32:00 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1 [junit] 11/08/10 22:32:00 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:49809, storageID=DS-399153341-67.195.138.24-49809-1313015166543, infoPort=56944, ipcPort=58098):DataXceiveServer: java.nio.channels.AsynchronousCloseException [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185) [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159) [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84) [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:130) [junit] at java.lang.Thread.run(Thread.java:662) [junit] [junit] 11/08/10 22:32:01 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread. [junit] 11/08/10 22:32:01 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0 [junit] 11/08/10 22:32:01 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:49809, storageID=DS-399153341-67.195.138.24-49809-1313015166543, infoPort=56944, ipcPort=58098):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data7/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data8/current'}> [junit] 11/08/10 22:32:01 INFO ipc.Server: Stopping server on 58098 [junit] 11/08/10 22:32:01 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0 [junit] Shutting down DataNode 2 [junit] 11/08/10 22:32:01 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:43514 to delete blk_1109052136061235418_1123 blk_2384903471780373549_1121 blk_4931082421985950165_1127 blk_3120036595885400354_1122 blk_7863595798514313049_1124 blk_837672737608699342_1126 [junit] 11/08/10 22:32:01 INFO hdfs.StateChange: BLOCK* ask 127.0.0.1:49809 to delete blk_1109052136061235418_1123 blk_2384903471780373549_1121 blk_4931082421985950165_1127 [junit] 11/08/10 22:32:01 INFO ipc.Server: Stopping server on 40421 [junit] 11/08/10 22:32:01 INFO ipc.Server: IPC Server handler 0 on 40421: exiting [junit] 11/08/10 22:32:01 INFO ipc.Server: Stopping IPC Server Responder [junit] 11/08/10 22:32:01 INFO ipc.Server: IPC Server handler 2 on 40421: exiting [junit] 11/08/10 22:32:01 INFO ipc.Server: Stopping IPC Server listener on 40421 [junit] 11/08/10 22:32:01 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:48977, storageID=DS-948614374-67.195.138.24-48977-1313015166286, infoPort=56589, ipcPort=40421):DataXceiveServer: java.nio.channels.AsynchronousCloseException [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185) [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159) [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84) [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:130) [junit] at java.lang.Thread.run(Thread.java:662) [junit] [junit] 11/08/10 22:32:01 INFO ipc.Server: IPC Server handler 1 on 40421: exiting [junit] 11/08/10 22:32:01 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 1 [junit] 11/08/10 22:32:01 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread. [junit] 11/08/10 22:32:01 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:48977, storageID=DS-948614374-67.195.138.24-48977-1313015166286, infoPort=56589, ipcPort=40421):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data5/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data6/current'}> [junit] 11/08/10 22:32:01 INFO ipc.Server: Stopping server on 40421 [junit] 11/08/10 22:32:01 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0 [junit] Shutting down DataNode 1 [junit] 11/08/10 22:32:01 INFO ipc.Server: Stopping server on 58796 [junit] 11/08/10 22:32:01 INFO ipc.Server: IPC Server handler 0 on 58796: exiting [junit] 11/08/10 22:32:01 INFO ipc.Server: IPC Server handler 2 on 58796: exiting [junit] 11/08/10 22:32:01 INFO ipc.Server: Stopping IPC Server listener on 58796 [junit] 11/08/10 22:32:01 INFO ipc.Server: IPC Server handler 1 on 58796: exiting [junit] 11/08/10 22:32:01 INFO ipc.Server: Stopping IPC Server Responder [junit] 11/08/10 22:32:01 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:57899, storageID=DS-847241149-67.195.138.24-57899-1313015166034, infoPort=34622, ipcPort=58796):DataXceiveServer: java.nio.channels.AsynchronousCloseException [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185) [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159) [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84) [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:130) [junit] at java.lang.Thread.run(Thread.java:662) [junit] [junit] 11/08/10 22:32:01 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0 [junit] 11/08/10 22:32:01 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread. [junit] 11/08/10 22:32:01 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:57899, storageID=DS-847241149-67.195.138.24-57899-1313015166034, infoPort=34622, ipcPort=58796):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data3/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data4/current'}> [junit] 11/08/10 22:32:01 INFO ipc.Server: Stopping server on 58796 [junit] 11/08/10 22:32:01 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0 [junit] Shutting down DataNode 0 [junit] 11/08/10 22:32:02 INFO ipc.Server: Stopping server on 47678 [junit] 11/08/10 22:32:02 INFO ipc.Server: IPC Server handler 0 on 47678: exiting [junit] 11/08/10 22:32:02 INFO ipc.Server: IPC Server handler 2 on 47678: exiting [junit] 11/08/10 22:32:02 INFO ipc.Server: IPC Server handler 1 on 47678: exiting [junit] 11/08/10 22:32:02 INFO ipc.Server: Stopping IPC Server Responder [junit] 11/08/10 22:32:02 INFO ipc.Server: Stopping IPC Server listener on 47678 [junit] 11/08/10 22:32:02 WARN datanode.DataNode: DatanodeRegistration(127.0.0.1:43514, storageID=DS-1415888336-67.195.138.24-43514-1313015165759, infoPort=44360, ipcPort=47678):DataXceiveServer: java.nio.channels.AsynchronousCloseException [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185) [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159) [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84) [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:130) [junit] at java.lang.Thread.run(Thread.java:662) [junit] [junit] 11/08/10 22:32:02 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0 [junit] 11/08/10 22:32:02 INFO datanode.DataBlockScanner: Exiting DataBlockScanner thread. [junit] 11/08/10 22:32:02 INFO datanode.DataNode: DatanodeRegistration(127.0.0.1:43514, storageID=DS-1415888336-67.195.138.24-43514-1313015165759, infoPort=44360, ipcPort=47678):Finishing DataNode in: FSDataset{dirpath='<https://builds.apache.org/job/Pig-trunk/ws/trunk/build/test/data/dfs/data/data1/current,/home/jenkins/jenkins-slave/workspace/Pig-trunk/trunk/build/test/data/dfs/data/data2/current'}> [junit] 11/08/10 22:32:02 INFO ipc.Server: Stopping server on 47678 [junit] 11/08/10 22:32:02 INFO datanode.DataNode: Waiting for threadgroup to exit, active threads is 0 [junit] 11/08/10 22:32:02 WARN namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted [junit] 11/08/10 22:32:02 INFO namenode.DecommissionManager: Interrupted Monitor [junit] java.lang.InterruptedException: sleep interrupted [junit] at java.lang.Thread.sleep(Native Method) [junit] at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65) [junit] at java.lang.Thread.run(Thread.java:662) [junit] 11/08/10 22:32:02 INFO namenode.FSNamesystem: Number of transactions: 694 Total time for transactions(ms): 17Number of transactions batched in Syncs: 115 Number of syncs: 483 SyncTimes(ms): 6836 289 [junit] 11/08/10 22:32:02 INFO ipc.Server: Stopping server on 39839 [junit] 11/08/10 22:32:02 INFO ipc.Server: IPC Server handler 0 on 39839: exiting [junit] 11/08/10 22:32:02 INFO ipc.Server: IPC Server handler 1 on 39839: exiting [junit] 11/08/10 22:32:02 INFO ipc.Server: IPC Server handler 3 on 39839: exiting [junit] 11/08/10 22:32:02 INFO ipc.Server: IPC Server handler 2 on 39839: exiting [junit] 11/08/10 22:32:02 INFO ipc.Server: IPC Server handler 4 on 39839: exiting [junit] 11/08/10 22:32:02 INFO ipc.Server: IPC Server handler 5 on 39839: exiting [junit] 11/08/10 22:32:02 INFO ipc.Server: IPC Server handler 7 on 39839: exiting [junit] 11/08/10 22:32:02 INFO ipc.Server: IPC Server handler 6 on 39839: exiting [junit] 11/08/10 22:32:02 INFO ipc.Server: IPC Server handler 9 on 39839: exiting [junit] 11/08/10 22:32:02 INFO ipc.Server: IPC Server handler 8 on 39839: exiting [junit] 11/08/10 22:32:02 INFO ipc.Server: Stopping IPC Server listener on 39839 [junit] 11/08/10 22:32:02 INFO ipc.Server: Stopping IPC Server Responder [junit] Tests run: 17, Failures: 0, Errors: 0, Time elapsed: 353.598 sec [junit] Running org.apache.pig.test.TestStringUDFs [junit] 11/08/10 22:32:03 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.NullPointerException [junit] 11/08/10 22:32:03 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2 [junit] 11/08/10 22:32:03 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -1 [junit] 11/08/10 22:32:03 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -8 [junit] 11/08/10 22:32:03 WARN builtin.SUBSTRING: No logger object provided to UDF: org.apache.pig.builtin.SUBSTRING. java.lang.StringIndexOutOfBoundsException: String index out of range: -2 [junit] 11/08/10 22:32:03 WARN builtin.INDEXOF: No logger object provided to UDF: org.apache.pig.builtin.INDEXOF. Failed to process input; error - null [junit] 11/08/10 22:32:03 WARN builtin.LAST_INDEX_OF: No logger object provided to UDF: org.apache.pig.builtin.LAST_INDEX_OF. Failed to process input; error - null [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0.084 sec [delete] Deleting directory /tmp/pig_junit_tmp1344005051 clover.check: BUILD FAILED <https://builds.apache.org/job/Pig-trunk/ws/trunk/build.xml>:1288: ################################################################## Clover not found. Please specify -Dclover.home=<base of clover installation> on the command line. ################################################################## Total time: 21 minutes 23 seconds [FINDBUGS] Skipping publisher since build result is FAILURE Recording test results Publishing Javadoc Archiving artifacts Recording fingerprints Publishing Clover coverage report... No Clover report will be published due to a Build Failure