See <https://builds.apache.org/job/Tajo-master-nightly/828/changes>
Changes:
[jhkim] TAJO-1729: No handling of default case in DDLExecutor. (Contributed by
Dongkyu Hwangbo, committed by Jinho)
------------------------------------------
[...truncated 744457 lines...]
at
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at
org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:969)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2049)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2045)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2045)
at org.apache.hadoop.ipc.Client.call(Client.java:1476)
at org.apache.hadoop.ipc.Client.call(Client.java:1407)
at
org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229)
at com.sun.proxy.$Proxy42.fsync(Unknown Source)
at
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.fsync(ClientNamenodeProtocolTranslatorPB.java:838)
at sun.reflect.GeneratedMethodAccessor410.invoke(Unknown Source)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at
org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187)
at
org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
at com.sun.proxy.$Proxy43.fsync(Unknown Source)
at sun.reflect.GeneratedMethodAccessor410.invoke(Unknown Source)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:279)
at com.sun.proxy.$Proxy83.fsync(Unknown Source)
at sun.reflect.GeneratedMethodAccessor410.invoke(Unknown Source)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:279)
at com.sun.proxy.$Proxy83.fsync(Unknown Source)
at
org.apache.hadoop.hdfs.DFSOutputStream.flushOrSync(DFSOutputStream.java:2022)
at
org.apache.hadoop.hdfs.DFSOutputStream.hsync(DFSOutputStream.java:1898)
at
org.apache.hadoop.fs.FSDataOutputStream.hsync(FSDataOutputStream.java:139)
at
org.apache.tajo.ha.HdfsServiceTracker.createMasterFile(HdfsServiceTracker.java:244)
at
org.apache.tajo.ha.HdfsServiceTracker.register(HdfsServiceTracker.java:155)
at
org.apache.tajo.ha.HdfsServiceTracker$PingChecker.run(HdfsServiceTracker.java:374)
at java.lang.Thread.run(Thread.java:724)
Tests run: 1, Failures: 0, Errors: 1, Skipped: 0, Time elapsed: 3.734 sec <<<
FAILURE! - in org.apache.tajo.ha.TestHAServiceHDFSImpl
testAutoFailOver(org.apache.tajo.ha.TestHAServiceHDFSImpl) Time elapsed: 3.734
sec <<< ERROR!
org.apache.tajo.exception.TajoRuntimeException:
org.apache.tajo.client.v2.exception.ClientConnectionException:
java.io.EOFException
at java.io.DataInputStream.readUnsignedShort(DataInputStream.java:340)
at java.io.DataInputStream.readUTF(DataInputStream.java:589)
at java.io.DataInputStream.readUTF(DataInputStream.java:564)
at
org.apache.tajo.ha.HdfsServiceTracker.getAddressElements(HdfsServiceTracker.java:498)
at
org.apache.tajo.ha.HdfsServiceTracker.getClientServiceAddress(HdfsServiceTracker.java:409)
at
org.apache.tajo.client.SessionConnection.getTajoMasterAddr(SessionConnection.java:361)
at
org.apache.tajo.client.SessionConnection.getTajoMasterConnection(SessionConnection.java:130)
at
org.apache.tajo.client.SessionConnection.<init>(SessionConnection.java:113)
at org.apache.tajo.client.TajoClientImpl.<init>(TajoClientImpl.java:62)
at org.apache.tajo.client.TajoClientImpl.<init>(TajoClientImpl.java:86)
at org.apache.tajo.client.TajoClientImpl.<init>(TajoClientImpl.java:82)
at
org.apache.tajo.ha.TestHAServiceHDFSImpl.verifyDataBaseAndTable(TestHAServiceHDFSImpl.java:152)
at
org.apache.tajo.ha.TestHAServiceHDFSImpl.testAutoFailOver(TestHAServiceHDFSImpl.java:82)
Running org.apache.tajo.parser.sql.TestSQLAnalyzer
2015-09-08 10:16:24,997 WARN: org.apache.hadoop.hdfs.DFSClient
(closeResponder(612)) - Caught exception
java.lang.InterruptedException
at java.lang.Object.wait(Native Method)
at java.lang.Thread.join(Thread.java:1260)
at java.lang.Thread.join(Thread.java:1334)
at
org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.closeResponder(DFSOutputStream.java:610)
at
org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.closeInternal(DFSOutputStream.java:578)
at
org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:574)
2015-09-08 10:16:25,000 ERROR: org.apache.hadoop.hdfs.server.datanode.DataNode
(run(278)) - 127.0.0.1:45058:DataXceiver error processing WRITE_BLOCK operation
src: /127.0.0.1:60039 dst: /127.0.0.1:45058
java.io.IOException: Premature EOF from inputStream
at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:201)
at
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:213)
at
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:134)
at
org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:109)
at
org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:472)
at
org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:849)
at
org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:804)
at
org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:137)
at
org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:74)
at
org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:251)
at java.lang.Thread.run(Thread.java:724)
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.267 sec - in
org.apache.tajo.parser.sql.TestSQLAnalyzer
2015-09-08 10:16:26,346 INFO: org.apache.tajo.master.TajoMaster (run(540)) -
============================================
2015-09-08 10:16:26,347 INFO: org.apache.tajo.master.TajoMaster (run(541)) -
TajoMaster received SIGINT Signal
2015-09-08 10:16:26,347 INFO: org.apache.tajo.master.TajoMaster (run(542)) -
============================================
2015-09-08 10:16:26,347 INFO: org.apache.tajo.rpc.NettyServerBase
(shutdown(173)) - Rpc (Tajo-REST) listened on 0:0:0:0:0:0:0:0:35087) shutdown
2015-09-08 10:16:26,348 INFO: org.apache.tajo.ws.rs.TajoRestService
(serviceStop(129)) - Tajo Rest Service stopped.
2015-09-08 10:16:26,353 INFO: org.apache.tajo.util.history.HistoryWriter
(run(275)) - HistoryWriter_127.0.0.1_35086 stopped.
2015-09-08 10:16:26,355 INFO: org.apache.tajo.session.SessionManager
(removeSession(86)) - Session deba0e83-af28-4f12-bbfe-3bf44b54d368 is removed.
2015-09-08 10:16:26,356 INFO: org.mortbay.log (info(67)) - Shutdown hook
executing
2015-09-08 10:16:26,356 INFO: org.mortbay.log (info(67)) - Shutdown hook
complete
2015-09-08 10:16:26,355 INFO: org.apache.tajo.worker.TajoWorker (run(570)) -
============================================
2015-09-08 10:16:26,357 INFO: org.apache.tajo.worker.TajoWorker (run(571)) -
TajoWorker received SIGINT Signal
2015-09-08 10:16:26,358 INFO: org.apache.tajo.worker.TajoWorker (run(572)) -
============================================
2015-09-08 10:16:26,360 INFO: org.apache.tajo.session.SessionManager
(removeSession(86)) - Session ad33bd2d-8524-4610-8a78-24cf3ee27d3f is removed.
ader initialized will read a total of 2 records.
Sep 8, 2015 10:00:44 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next
block
Sep 8, 2015 10:00:44 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in
1 ms. row count = 2
Sep 8, 2015 10:01:02 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore
to file. allocated memory: 26
Sep 8, 2015 10:01:02 AM INFO:
org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for
[l_orderkey] INT32: 1 values, 10B raw, 10B comp, 1 pages, encodings:
[BIT_PACKED, PLAIN, RLE]
Sep 8, 2015 10:01:02 AM INFO:
org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 65B for
[l_shipdate_function] BINARY: 1 values, 20B raw, 20B comp, 1 pages, encodings:
[BIT_PACKED, PLAIN, RLE]
Sep 8, 2015 10:01:02 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore
to file. allocated memory: 26
Sep 8, 2015 10:01:02 AM INFO:
org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for
[l_orderkey] INT32: 1 values, 10B raw, 10B comp, 1 pages, encodings:
[BIT_PACKED, PLAIN, RLE]
Sep 8, 2015 10:01:02 AM INFO:
org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 65B for
[l_shipdate_function] BINARY: 1 values, 20B raw, 20B comp, 1 pages, encodings:
[BIT_PACKED, PLAIN, RLE]
Sep 8, 2015 10:01:02 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore
to file. allocated memory: 26
Sep 8, 2015 10:01:02 AM INFO:
org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for
[l_orderkey] INT32: 1 values, 10B raw, 10B comp, 1 pages, encodings:
[BIT_PACKED, PLAIN, RLE]
Sep 8, 2015 10:01:02 AM INFO:
org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 65B for
[l_shipdate_function] BINARY: 1 values, 20B raw, 20B comp, 1 pages, encodings:
[BIT_PACKED, PLAIN, RLE]
Sep 8, 2015 10:01:02 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore
to file. allocated memory: 26
Sep 8, 2015 10:01:02 AM INFO:
org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for
[l_orderkey] INT32: 1 values, 10B raw, 10B comp, 1 pages, encodings:
[BIT_PACKED, PLAIN, RLE]
Sep 8, 2015 10:01:02 AM INFO:
org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 65B for
[l_shipdate_function] BINARY: 1 values, 20B raw, 20B comp, 1 pages, encodings:
[BIT_PACKED, PLAIN, RLE]
Sep 8, 2015 10:01:02 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore
to file. allocated memory: 26
Sep 8, 2015 10:01:02 AM INFO:
org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for
[l_orderkey] INT32: 1 values, 10B raw, 10B comp, 1 pages, encodings:
[BIT_PACKED, PLAIN, RLE]
Sep 8, 2015 10:01:02 AM INFO:
org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 65B for
[l_shipdate_function] BINARY: 1 values, 20B raw, 20B comp, 1 pages, encodings:
[BIT_PACKED, PLAIN, RLE]
Sep 8, 2015 10:01:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
Initiating action with parallelism: 5
Sep 8, 2015 10:01:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
Initiating action with parallelism: 5
Sep 8, 2015 10:01:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
Initiating action with parallelism: 5
Sep 8, 2015 10:01:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
reading another 1 footers
Sep 8, 2015 10:01:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
Initiating action with parallelism: 5
Sep 8, 2015 10:01:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
reading another 1 footers
Sep 8, 2015 10:01:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
reading another 1 footers
Sep 8, 2015 10:01:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
Initiating action with parallelism: 5
Sep 8, 2015 10:01:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
Initiating action with parallelism: 5
Sep 8, 2015 10:01:04 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized
will read a total of 1 records.
Sep 8, 2015 10:01:04 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next
block
Sep 8, 2015 10:01:04 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized
will read a total of 1 records.
Sep 8, 2015 10:01:04 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next
block
Sep 8, 2015 10:01:04 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized
will read a total of 1 records.
Sep 8, 2015 10:01:04 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next
block
Sep 8, 2015 10:01:04 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in
1 ms. row count = 1
Sep 8, 2015 10:01:04 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in
1 ms. row count = 1
Sep 8, 2015 10:01:04 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in
1 ms. row count = 1
Sep 8, 2015 10:01:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
Initiating action with parallelism: 5
Sep 8, 2015 10:01:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
reading another 1 footers
Sep 8, 2015 10:01:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
Initiating action with parallelism: 5
Sep 8, 2015 10:01:04 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized
will read a total of 1 records.
Sep 8, 2015 10:01:04 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next
block
Sep 8, 2015 10:01:04 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in
1 ms. row count = 1
Sep 8, 2015 10:01:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
Initiating action with parallelism: 5
Sep 8, 2015 10:01:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
reading another 1 footers
Sep 8, 2015 10:01:04 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
Initiating action with parallelism: 5
Sep 8, 2015 10:01:04 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized
will read a total of 1 records.
Sep 8, 2015 10:01:04 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next
block
Sep 8, 2015 10:01:04 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in
1 ms. row count = 1
Sep 8, 2015 10:01:06 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordWriter: Flushing mem columnStore
to file. allocated memory: 212
Sep 8, 2015 10:01:06 AM INFO:
org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 43B for
[l_orderkey] INT32: 5 values, 10B raw, 10B comp, 1 pages, encodings:
[PLAIN_DICTIONARY, BIT_PACKED, RLE], dic { 3 entries, 12B raw, 3B comp}
Sep 8, 2015 10:01:06 AM INFO:
org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 123B for
[l_shipdate] BINARY: 5 values, 76B raw, 76B comp, 1 pages, encodings:
[BIT_PACKED, PLAIN, RLE]
Sep 8, 2015 10:01:06 AM INFO:
org.apache.parquet.hadoop.ColumnChunkPageWriteStore: written 123B for
[l_shipdate_function] BINARY: 5 values, 76B raw, 76B comp, 1 pages, encodings:
[BIT_PACKED, PLAIN, RLE]
Sep 8, 2015 10:01:07 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
Initiating action with parallelism: 5
Sep 8, 2015 10:01:07 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
reading another 1 footers
Sep 8, 2015 10:01:07 AM INFO: org.apache.parquet.hadoop.ParquetFileReader:
Initiating action with parallelism: 5
Sep 8, 2015 10:01:07 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: RecordReader initialized
will read a total of 5 records.
Sep 8, 2015 10:01:07 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: at row 0. reading next
block
Sep 8, 2015 10:01:07 AM INFO:
org.apache.parquet.hadoop.InternalParquetRecordReader: block read in memory in
1 ms. row count = 5
2015-09-08 10:16:26,361 INFO: BlockStateChange (logAddStoredBlock(2624)) -
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:45058 is added to
blk_1073741857_1033{UCState=COMMITTED, truncateBlock=null, primaryNodeIndex=-1,
replicas=[ReplicaUC[[DISK]DS-729bc829-046f-422e-9e20-c7bc8ee207e9:NORMAL:127.0.0.1:45058|RBW]]}
size 3365984
2015-09-08 10:16:26,361 INFO: org.apache.tajo.util.history.HistoryWriter
(run(275)) - HistoryWriter_asf909.gq1.ygridcore.net_35088 stopped.
2015-09-08 10:16:26,364 INFO: BlockStateChange
(processAndHandleReportedBlock(3171)) - BLOCK* addBlock: block
blk_1073748726_7902 on node 127.0.0.1:45058 size 134217728 does not belong to
any file
2015-09-08 10:16:26,361 INFO: org.apache.tajo.util.history.HistoryCleaner
(run(136)) - History cleaner stopped
2015-09-08 10:16:26,364 INFO: BlockStateChange (add(115)) - BLOCK*
InvalidateBlocks: add blk_1073748726_7902 to 127.0.0.1:45058
2015-09-08 10:16:26,429 INFO: org.apache.tajo.worker.NodeStatusUpdater
(serviceStop(111)) - NodeStatusUpdater stopped.
2015-09-08 10:16:26,429 INFO: org.apache.tajo.worker.NodeStatusUpdater
(run(262)) - Heartbeat Thread stopped.
2015-09-08 10:16:26,431 INFO: org.apache.tajo.rpc.NettyServerBase
(shutdown(173)) - Rpc (QueryMasterProtocol) listened on 0:0:0:0:0:0:0:0:35090)
shutdown
2015-09-08 10:16:26,431 INFO:
org.apache.tajo.querymaster.QueryMasterManagerService (serviceStop(106)) -
QueryMasterManagerService stopped
2015-09-08 10:16:26,432 INFO: org.apache.tajo.querymaster.QueryMaster
(run(425)) - QueryMaster heartbeat thread stopped
2015-09-08 10:16:26,432 INFO: org.apache.tajo.querymaster.QueryMaster
(serviceStop(161)) - QueryMaster stopped
2015-09-08 10:16:26,432 INFO: org.apache.tajo.worker.TajoWorkerClientService
(stop(98)) - TajoWorkerClientService stopping
2015-09-08 10:16:26,434 INFO: org.apache.tajo.rpc.NettyServerBase
(shutdown(173)) - Rpc (QueryMasterClientProtocol) listened on
0:0:0:0:0:0:0:0:35089) shutdown
2015-09-08 10:16:26,434 INFO: org.apache.tajo.worker.TajoWorkerClientService
(stop(102)) - TajoWorkerClientService stopped
2015-09-08 10:16:26,434 INFO: org.apache.tajo.rpc.NettyServerBase
(shutdown(173)) - Rpc (TajoWorkerProtocol) listened on 0:0:0:0:0:0:0:0:35088)
shutdown
2015-09-08 10:16:26,434 INFO: org.apache.tajo.worker.TajoWorkerManagerService
(serviceStop(93)) - TajoWorkerManagerService stopped
2015-09-08 10:16:26,435 INFO: org.apache.tajo.worker.TajoWorker
(serviceStop(380)) - TajoWorker main thread exiting
2015-09-08 10:16:26,763 INFO: org.apache.tajo.util.history.HistoryCleaner
(run(136)) - History cleaner stopped
2015-09-08 10:16:26,763 INFO: org.apache.tajo.rpc.NettyServerBase
(shutdown(173)) - Rpc (QueryCoordinatorProtocol) listened on 127.0.0.1:35086)
shutdown
2015-09-08 10:16:26,765 INFO: org.apache.tajo.rpc.NettyServerBase
(shutdown(173)) - Rpc (TajoMasterClientProtocol) listened on 127.0.0.1:35085)
shutdown
2015-09-08 10:16:26,765 INFO: org.apache.tajo.catalog.CatalogServer
(serviceStop(182)) - Catalog Server (127.0.0.1:35084) shutdown
2015-09-08 10:16:26,765 INFO: org.apache.tajo.rpc.NettyServerBase
(shutdown(173)) - Rpc (CatalogProtocol) listened on 127.0.0.1:35084) shutdown
2015-09-08 10:16:26,766 INFO: org.apache.tajo.catalog.store.DerbyStore
(close(2783)) - Close database
(jdbc:derby:memory:<https://builds.apache.org/job/Tajo-master-nightly/ws/tajo-core-tests/target/test-data/d238f3f2-132f-406f-8142-0537b1ac1c3a/db;create=true)>
2015-09-08 10:16:26,766 INFO: org.apache.tajo.rpc.NettyServerBase
(shutdown(173)) - Rpc (TajoResourceTrackerProtocol) listened on
127.0.0.1:35083) shutdown
2015-09-08 10:16:26,767 INFO: org.apache.tajo.master.TajoMaster
(serviceStop(408)) - Tajo Master main thread exiting
2015-09-08 10:16:27,003 INFO: org.apache.tajo.catalog.store.DerbyStore
(shutdown(68)) - Derby shutdown complete normally.
2015-09-08 10:16:27,003 INFO: org.apache.tajo.catalog.store.DerbyStore
(shutdown(75)) - Shutdown database
2015-09-08 10:16:27,126 INFO: BlockStateChange (invalidateWorkForOneNode(3488))
- BLOCK* BlockManager: ask 127.0.0.1:45058 to delete [blk_1073748725_7901,
blk_1073748726_7902]
Results :
Tests in error:
TestHAServiceHDFSImpl.testAutoFailOver:82->verifyDataBaseAndTable:152 ยป
TajoRuntime
Tests run: 1631, Failures: 0, Errors: 1, Skipped: 0
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO]
[INFO] Tajo Main ......................................... SUCCESS [ 1.665 s]
[INFO] Tajo Project POM .................................. SUCCESS [ 1.337 s]
[INFO] Tajo Maven Plugins ................................ SUCCESS [ 2.662 s]
[INFO] Tajo Common ....................................... SUCCESS [ 38.865 s]
[INFO] Tajo Algebra ...................................... SUCCESS [ 2.424 s]
[INFO] Tajo Catalog Common ............................... SUCCESS [ 5.109 s]
[INFO] Tajo Plan ......................................... SUCCESS [ 6.127 s]
[INFO] Tajo Rpc Common ................................... SUCCESS [ 1.441 s]
[INFO] Tajo Protocol Buffer Rpc .......................... SUCCESS [ 48.771 s]
[INFO] Tajo Catalog Client ............................... SUCCESS [ 1.336 s]
[INFO] Tajo Catalog Server ............................... SUCCESS [ 57.626 s]
[INFO] Tajo Storage Common ............................... SUCCESS [ 2.976 s]
[INFO] Tajo HDFS Storage ................................. SUCCESS [01:02 min]
[INFO] Tajo PullServer ................................... SUCCESS [ 1.091 s]
[INFO] Tajo Client ....................................... SUCCESS [ 2.387 s]
[INFO] Tajo CLI tools .................................... SUCCESS [ 1.806 s]
[INFO] Tajo SQL Parser ................................... SUCCESS [ 5.043 s]
[INFO] ASM (thirdparty) .................................. SUCCESS [ 1.736 s]
[INFO] Tajo RESTful Container ............................ SUCCESS [ 3.930 s]
[INFO] Tajo Metrics ...................................... SUCCESS [ 1.667 s]
[INFO] Tajo Core ......................................... SUCCESS [ 6.191 s]
[INFO] Tajo RPC .......................................... SUCCESS [ 0.954 s]
[INFO] Tajo Catalog Drivers Hive ......................... SUCCESS [ 30.750 s]
[INFO] Tajo Catalog Drivers .............................. SUCCESS [ 0.085 s]
[INFO] Tajo Catalog ...................................... SUCCESS [ 1.086 s]
[INFO] Tajo Client Example ............................... SUCCESS [ 1.062 s]
[INFO] Tajo HBase Storage ................................ SUCCESS [ 4.006 s]
[INFO] Tajo Cluster Tests ................................ SUCCESS [ 2.433 s]
[INFO] Tajo JDBC Driver .................................. SUCCESS [ 35.288 s]
[INFO] Tajo Storage ...................................... SUCCESS [ 0.998 s]
[INFO] Tajo Distribution ................................. SUCCESS [ 5.572 s]
[INFO] Tajo Core Tests ................................... FAILURE [22:57 min]
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 28:36 min
[INFO] Finished at: 2015-09-08T10:16:27+00:00
[INFO] Final Memory: 77M/460M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal
org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on
project tajo-core-tests: There are test failures.
[ERROR]
[ERROR] Please refer to
<https://builds.apache.org/job/Tajo-master-nightly/ws/tajo-core-tests/target/surefire-reports>
for the individual test results.
[ERROR] -> [Help 1]
[ERROR]
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e
switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR]
[ERROR] For more information about the errors and possible solutions, please
read the following articles:
[ERROR] [Help 1]
http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR]
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR] mvn <goals> -rf :tajo-core-tests
Build step 'Execute shell' marked build as failure
Archiving artifacts
Sending artifact delta relative to Tajo-master-nightly #825
Archived 1 artifacts
Archive block size is 32768
Received 0 blocks and 59328738 bytes
Compression is 0.0%
Took 14 sec
Recording test results