See <https://builds.apache.org/job/beam_PostCommit_Python_Verify/4904/display/redirect>
------------------------------------------ [...truncated 1.27 MB...] [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSNamesystem: dfs.namenode.safemode.min.datanodes = 0 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSNamesystem: dfs.namenode.safemode.extension = 30000 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.window.num.buckets = 10 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.num.users = 10 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.windows.minutes = 1,5,25 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSNamesystem: Retry cache on namenode is enabled [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO util.GSet: Computing capacity for map NameNodeRetryCache [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO util.GSet: VM type = 64-bit [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO util.GSet: 0.029999999329447746% max memory 958.5 MB = 294.5 KB [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO util.GSet: capacity = 2^15 = 32768 entries [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO common.Storage: Lock on /hadoop/dfs/name/in_use.lock acquired by nodename 143@namenode [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FileJournalManager: Recovering unfinalized segments in /hadoop/dfs/name/current [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSImage: No edit log streams selected. [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSImage: Planning to load image: FSImageFile(file=/hadoop/dfs/name/current/fsimage_0000000000000000000, cpktTxId=0000000000000000000) [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSImageFormatPBINode: Loading 1 INodes. [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSImageFormatProtobuf: Loaded FSImage in 0 seconds. [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSImage: Loaded image for txid 0 from /hadoop/dfs/name/current/fsimage_0000000000000000000 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSNamesystem: Need to save fs image? false (staleImage=false, haEnabled=false, isRollingUpgrade=false) [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSEditLog: Starting log segment at 1 [33mdatanode_1 |[0m 18/05/09 03:37:34 INFO ipc.Client: Retrying connect to server: namenode/172.18.0.2:8020. Already tried 0 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.NameCache: initialized with 0 entries 0 lookups [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSNamesystem: Finished loading FSImage in 301 msecs [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.NameNode: RPC server is binding to 0.0.0.0:8020 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO ipc.CallQueueManager: Using callQueue: class java.util.concurrent.LinkedBlockingQueue queueCapacity: 1000 scheduler: class org.apache.hadoop.ipc.DefaultRpcScheduler [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO ipc.Server: Starting Socket Reader #1 for port 8020 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSNamesystem: Registered FSNamesystemState MBean [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.LeaseManager: Number of blocks under construction: 0 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO blockmanagement.BlockManager: initializing replication queues [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO hdfs.StateChange: STATE* Leaving safe mode after 0 secs [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO hdfs.StateChange: STATE* Network topology has 0 racks and 0 datanodes [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO hdfs.StateChange: STATE* UnderReplicatedBlocks has 0 blocks [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO blockmanagement.BlockManager: Total number of blocks = 0 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO blockmanagement.BlockManager: Number of invalid blocks = 0 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO blockmanagement.BlockManager: Number of under-replicated blocks = 0 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO blockmanagement.BlockManager: Number of over-replicated blocks = 0 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO blockmanagement.BlockManager: Number of blocks being written = 0 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO hdfs.StateChange: STATE* Replication Queue initialization scan for invalid, over- and under-replicated blocks completed in 11 msec [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO ipc.Server: IPC Server Responder: starting [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO ipc.Server: IPC Server listener on 8020: starting [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.NameNode: NameNode RPC up at: namenode/172.18.0.2:8020 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSNamesystem: Starting services required for active state [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSDirectory: Initializing quota with 4 thread(s) [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO namenode.FSDirectory: Quota initialization completed in 4 milliseconds [36mnamenode_1 |[0m name space=1 [36mnamenode_1 |[0m storage space=0 [36mnamenode_1 |[0m storage types=RAM_DISK=0, SSD=0, DISK=0, ARCHIVE=0 [36mnamenode_1 |[0m 18/05/09 03:37:34 INFO blockmanagement.CacheReplicationMonitor: Starting CacheReplicationMonitor with interval 30000 milliseconds [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO ipc.Client: Retrying connect to server: namenode/172.18.0.2:8020. Already tried 1 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO datanode.DataNode: Acknowledging ACTIVE Namenode during handshakeBlock pool <registering> (Datanode Uuid unassigned) service to namenode/172.18.0.2:8020 [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO common.Storage: Using 1 threads to upgrade data directories (dfs.datanode.parallel.volumes.load.threads.num=1, dataDirs=1) [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO common.Storage: Lock on /hadoop/dfs/data/in_use.lock acquired by nodename 83@datanode [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO common.Storage: Storage directory /hadoop/dfs/data is not formatted for namespace 1999203236. Formatting... [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO common.Storage: Generated new storageID DS-6c0fdbe7-4c77-458e-a5f1-a89a45c546ff for directory /hadoop/dfs/data [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO common.Storage: Analyzing storage directories for bpid BP-892499668-172.18.0.2-1525837052284 [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO common.Storage: Locking is disabled for /hadoop/dfs/data/current/BP-892499668-172.18.0.2-1525837052284 [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO common.Storage: Block pool storage directory /hadoop/dfs/data/current/BP-892499668-172.18.0.2-1525837052284 is not formatted for BP-892499668-172.18.0.2-1525837052284. Formatting ... [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO common.Storage: Formatting block pool BP-892499668-172.18.0.2-1525837052284 directory /hadoop/dfs/data/current/BP-892499668-172.18.0.2-1525837052284/current [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO datanode.DataNode: Setting up storage: nsid=1999203236;bpid=BP-892499668-172.18.0.2-1525837052284;lv=-57;nsInfo=lv=-63;cid=CID-f92a0eb3-ce75-42b1-83a5-9ad6c0faa57c;nsid=1999203236;c=1525837052284;bpid=BP-892499668-172.18.0.2-1525837052284;dnuuid=null [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO datanode.DataNode: Generated and persisted new Datanode UUID ea93857e-c0c6-494c-887b-ab0ab1e96b58 [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO impl.FsDatasetImpl: Added new volume: DS-6c0fdbe7-4c77-458e-a5f1-a89a45c546ff [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO impl.FsDatasetImpl: Added volume - /hadoop/dfs/data/current, StorageType: DISK [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO impl.FsDatasetImpl: Registered FSDatasetState MBean [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO impl.FsDatasetImpl: Volume reference is released. [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO impl.FsDatasetImpl: Adding block pool BP-892499668-172.18.0.2-1525837052284 [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO impl.FsDatasetImpl: Scanning block pool BP-892499668-172.18.0.2-1525837052284 on volume /hadoop/dfs/data/current... [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO impl.FsDatasetImpl: Time taken to scan block pool BP-892499668-172.18.0.2-1525837052284 on /hadoop/dfs/data/current: 14ms [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO impl.FsDatasetImpl: Total time to scan all replicas for block pool BP-892499668-172.18.0.2-1525837052284: 16ms [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO impl.FsDatasetImpl: Adding replicas to map for block pool BP-892499668-172.18.0.2-1525837052284 on volume /hadoop/dfs/data/current... [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO impl.BlockPoolSlice: Replica Cache file: /hadoop/dfs/data/current/BP-892499668-172.18.0.2-1525837052284/current/replicas doesn't exist [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO impl.FsDatasetImpl: Time to add replicas to map for block pool BP-892499668-172.18.0.2-1525837052284 on volume /hadoop/dfs/data/current: 1ms [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO impl.FsDatasetImpl: Total time to add all replicas to map: 1ms [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO datanode.VolumeScanner: Now scanning bpid BP-892499668-172.18.0.2-1525837052284 on volume /hadoop/dfs/data [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO datanode.VolumeScanner: VolumeScanner(/hadoop/dfs/data, DS-6c0fdbe7-4c77-458e-a5f1-a89a45c546ff): finished scanning block pool BP-892499668-172.18.0.2-1525837052284 [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO datanode.DirectoryScanner: Periodic Directory Tree Verification scan starting at 5/9/18 9:14 AM with interval of 21600000ms [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO datanode.DataNode: Block pool BP-892499668-172.18.0.2-1525837052284 (Datanode Uuid ea93857e-c0c6-494c-887b-ab0ab1e96b58) service to namenode/172.18.0.2:8020 beginning handshake with NN [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO datanode.VolumeScanner: VolumeScanner(/hadoop/dfs/data, DS-6c0fdbe7-4c77-458e-a5f1-a89a45c546ff): no suitable block pools found to scan. Waiting 1814399970 ms. [36mnamenode_1 |[0m 18/05/09 03:37:35 INFO hdfs.StateChange: BLOCK* registerDatanode: from DatanodeRegistration(172.18.0.3:50010, datanodeUuid=ea93857e-c0c6-494c-887b-ab0ab1e96b58, infoPort=50075, infoSecurePort=0, ipcPort=50020, storageInfo=lv=-57;cid=CID-f92a0eb3-ce75-42b1-83a5-9ad6c0faa57c;nsid=1999203236;c=1525837052284) storage ea93857e-c0c6-494c-887b-ab0ab1e96b58 [36mnamenode_1 |[0m 18/05/09 03:37:35 INFO net.NetworkTopology: Adding a new node: /default-rack/172.18.0.3:50010 [36mnamenode_1 |[0m 18/05/09 03:37:35 INFO blockmanagement.BlockReportLeaseManager: Registered DN ea93857e-c0c6-494c-887b-ab0ab1e96b58 (172.18.0.3:50010). [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO datanode.DataNode: Block pool Block pool BP-892499668-172.18.0.2-1525837052284 (Datanode Uuid ea93857e-c0c6-494c-887b-ab0ab1e96b58) service to namenode/172.18.0.2:8020 successfully registered with NN [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO datanode.DataNode: For namenode namenode/172.18.0.2:8020 using BLOCKREPORT_INTERVAL of 21600000msec CACHEREPORT_INTERVAL of 10000msec Initial delay: 0msec; heartBeatInterval=3000 [36mnamenode_1 |[0m 18/05/09 03:37:35 INFO blockmanagement.DatanodeDescriptor: Adding new storage ID DS-6c0fdbe7-4c77-458e-a5f1-a89a45c546ff for DN 172.18.0.3:50010 [36mnamenode_1 |[0m 18/05/09 03:37:35 INFO BlockStateChange: BLOCK* processReport 0x390ca072ee5486e1: Processing first storage report for DS-6c0fdbe7-4c77-458e-a5f1-a89a45c546ff from datanode ea93857e-c0c6-494c-887b-ab0ab1e96b58 [36mnamenode_1 |[0m 18/05/09 03:37:35 INFO BlockStateChange: BLOCK* processReport 0x390ca072ee5486e1: from storage DS-6c0fdbe7-4c77-458e-a5f1-a89a45c546ff node DatanodeRegistration(172.18.0.3:50010, datanodeUuid=ea93857e-c0c6-494c-887b-ab0ab1e96b58, infoPort=50075, infoSecurePort=0, ipcPort=50020, storageInfo=lv=-57;cid=CID-f92a0eb3-ce75-42b1-83a5-9ad6c0faa57c;nsid=1999203236;c=1525837052284), blocks: 0, hasStaleStorage: false, processing time: 2 msecs, invalidatedBlocks: 0 [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO datanode.DataNode: Successfully sent block report 0x390ca072ee5486e1, containing 1 storage report(s), of which we sent 1. The reports had 0 total blocks and used 1 RPC(s). This took 4 msec to generate and 52 msecs for RPC and NN processing. Got back one command: FinalizeCommand/5. [33mdatanode_1 |[0m 18/05/09 03:37:35 INFO datanode.DataNode: Got finalize command for block pool BP-892499668-172.18.0.2-1525837052284 [32mtest_1 |[0m INFO Instantiated configuration from '/app/sdks/python/apache_beam/io/hdfs_integration_test/hdfscli.cfg'. [32mtest_1 |[0m INFO Instantiated <InsecureClient(url='http://namenode:50070')>. [32mtest_1 |[0m INFO Uploading 'kinglear.txt' to '/'. [32mtest_1 |[0m DEBUG Resolved path '/' to '/'. [32mtest_1 |[0m INFO Listing '/'. [32mtest_1 |[0m DEBUG Resolved path '/' to '/'. [32mtest_1 |[0m DEBUG Resolved path '/' to '/'. [32mtest_1 |[0m DEBUG Starting new HTTP connection (1): namenode [36mnamenode_1 |[0m May 09, 2018 3:38:19 AM com.sun.jersey.api.core.PackagesResourceConfig init [36mnamenode_1 |[0m INFO: Scanning for root resource and provider classes in the packages: [36mnamenode_1 |[0m org.apache.hadoop.hdfs.server.namenode.web.resources [36mnamenode_1 |[0m org.apache.hadoop.hdfs.web.resources [36mnamenode_1 |[0m May 09, 2018 3:38:19 AM com.sun.jersey.api.core.ScanningResourceConfig logClasses [36mnamenode_1 |[0m INFO: Root resource classes found: [36mnamenode_1 |[0m class org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods [36mnamenode_1 |[0m May 09, 2018 3:38:19 AM com.sun.jersey.api.core.ScanningResourceConfig logClasses [36mnamenode_1 |[0m INFO: Provider classes found: [36mnamenode_1 |[0m class org.apache.hadoop.hdfs.web.resources.UserProvider [36mnamenode_1 |[0m class org.apache.hadoop.hdfs.web.resources.ExceptionHandler [36mnamenode_1 |[0m May 09, 2018 3:38:19 AM com.sun.jersey.server.impl.application.WebApplicationImpl _initiate [36mnamenode_1 |[0m INFO: Initiating Jersey application, version 'Jersey: 1.9 09/02/2011 11:17 AM' [36mnamenode_1 |[0m May 09, 2018 3:38:20 AM com.sun.jersey.spi.inject.Errors processErrorMessages [36mnamenode_1 |[0m WARNING: The following warnings have been detected with resource and/or provider classes: [36mnamenode_1 |[0m WARNING: A sub-resource method, public javax.ws.rs.core.Response org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods.postRoot(org.apache.hadoop.security.UserGroupInformation,org.apache.hadoop.hdfs.web.resources.DelegationParam,org.apache.hadoop.hdfs.web.resources.UserParam,org.apache.hadoop.hdfs.web.resources.DoAsParam,org.apache.hadoop.hdfs.web.resources.PostOpParam,org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam,org.apache.hadoop.hdfs.web.resources.BufferSizeParam,org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam,org.apache.hadoop.hdfs.web.resources.NewLengthParam) throws java.io.IOException,java.lang.InterruptedException, with URI template, "/", is treated as a resource method [36mnamenode_1 |[0m WARNING: A sub-resource method, public javax.ws.rs.core.Response org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods.deleteRoot(org.apache.hadoop.security.UserGroupInformation,org.apache.hadoop.hdfs.web.resources.DelegationParam,org.apache.hadoop.hdfs.web.resources.UserParam,org.apache.hadoop.hdfs.web.resources.DoAsParam,org.apache.hadoop.hdfs.web.resources.DeleteOpParam,org.apache.hadoop.hdfs.web.resources.RecursiveParam,org.apache.hadoop.hdfs.web.resources.SnapshotNameParam) throws java.io.IOException,java.lang.InterruptedException, with URI template, "/", is treated as a resource method [36mnamenode_1 |[0m WARNING: A sub-resource method, public javax.ws.rs.core.Response org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods.putRoot(org.apache.hadoop.security.UserGroupInformation,org.apache.hadoop.hdfs.web.resources.DelegationParam,org.apache.hadoop.hdfs.web.resources.UserParam,org.apache.hadoop.hdfs.web.resources.DoAsParam,org.apache.hadoop.hdfs.web.resources.PutOpParam,org.apache.hadoop.hdfs.web.resources.DestinationParam,org.apache.hadoop.hdfs.web.resources.OwnerParam,org.apache.hadoop.hdfs.web.resources.GroupParam,org.apache.hadoop.hdfs.web.resources.PermissionParam,org.apache.hadoop.hdfs.web.resources.OverwriteParam,org.apache.hadoop.hdfs.web.resources.BufferSizeParam,org.apache.hadoop.hdfs.web.resources.ReplicationParam,org.apache.hadoop.hdfs.web.resources.BlockSizeParam,org.apache.hadoop.hdfs.web.resources.ModificationTimeParam,org.apache.hadoop.hdfs.web.resources.AccessTimeParam,org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam,org.apache.hadoop.hdfs.web.resources.CreateParentParam,org.apache.hadoop.hdfs.web.resources.TokenArgumentParam,org.apache.hadoop.hdfs.web.resources.AclPermissionParam,org.apache.hadoop.hdfs.web.resources.XAttrNameParam,org.apache.hadoop.hdfs.web.resources.XAttrValueParam,org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam,org.apache.hadoop.hdfs.web.resources.SnapshotNameParam,org.apache.hadoop.hdfs.web.resources.OldSnapshotNameParam,org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam,org.apache.hadoop.hdfs.web.resources.CreateFlagParam,org.apache.hadoop.hdfs.web.resources.StoragePolicyParam) throws java.io.IOException,java.lang.InterruptedException, with URI template, "/", is treated as a resource method [36mnamenode_1 |[0m WARNING: A sub-resource method, public javax.ws.rs.core.Response org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods.getRoot(org.apache.hadoop.security.UserGroupInformation,org.apache.hadoop.hdfs.web.resources.DelegationParam,org.apache.hadoop.hdfs.web.resources.UserParam,org.apache.hadoop.hdfs.web.resources.DoAsParam,org.apache.hadoop.hdfs.web.resources.GetOpParam,org.apache.hadoop.hdfs.web.resources.OffsetParam,org.apache.hadoop.hdfs.web.resources.LengthParam,org.apache.hadoop.hdfs.web.resources.RenewerParam,org.apache.hadoop.hdfs.web.resources.BufferSizeParam,java.util.List,org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam,org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam,org.apache.hadoop.hdfs.web.resources.FsActionParam,org.apache.hadoop.hdfs.web.resources.TokenKindParam,org.apache.hadoop.hdfs.web.resources.TokenServiceParam) throws java.io.IOException,java.lang.InterruptedException, with URI template, "/", is treated as a resource method [32mtest_1 |[0m DEBUG http://namenode:50070 "GET /webhdfs/v1/?user.name=root&op=LISTSTATUS HTTP/1.1" 200 None [32mtest_1 |[0m DEBUG Uploading 1 files using 1 thread(s). [32mtest_1 |[0m DEBUG Uploading 'kinglear.txt' to '/kinglear.txt'. [32mtest_1 |[0m INFO Writing to '/kinglear.txt'. [32mtest_1 |[0m DEBUG Resolved path '/kinglear.txt' to '/kinglear.txt'. [32mtest_1 |[0m DEBUG http://namenode:50070 "PUT /webhdfs/v1/kinglear.txt?user.name=root&overwrite=False&op=CREATE HTTP/1.1" 307 0 [32mtest_1 |[0m DEBUG Starting new HTTP connection (1): datanode [33mdatanode_1 |[0m 18/05/09 03:38:20 INFO datanode.webhdfs: 172.18.0.4 PUT /webhdfs/v1/kinglear.txt?op=CREATE&user.name=root&namenoderpcaddress=namenode:8020&createflag=&createparent=true&overwrite=false&user.name=root 201 [36mnamenode_1 |[0m 18/05/09 03:38:20 INFO hdfs.StateChange: BLOCK* allocate blk_1073741825_1001, replicas=172.18.0.3:50010 for /kinglear.txt [33mdatanode_1 |[0m 18/05/09 03:38:20 INFO datanode.DataNode: Receiving BP-892499668-172.18.0.2-1525837052284:blk_1073741825_1001 src: /172.18.0.3:56130 dest: /172.18.0.3:50010 [33mdatanode_1 |[0m 18/05/09 03:38:21 INFO DataNode.clienttrace: src: /172.18.0.3:56130, dest: /172.18.0.3:50010, bytes: 157283, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-296196038_67, offset: 0, srvID: ea93857e-c0c6-494c-887b-ab0ab1e96b58, blockid: BP-892499668-172.18.0.2-1525837052284:blk_1073741825_1001, duration: 11283719 [33mdatanode_1 |[0m 18/05/09 03:38:21 INFO datanode.DataNode: PacketResponder: BP-892499668-172.18.0.2-1525837052284:blk_1073741825_1001, type=LAST_IN_PIPELINE terminating [36mnamenode_1 |[0m 18/05/09 03:38:21 INFO namenode.FSNamesystem: BLOCK* blk_1073741825_1001 is COMMITTED but not COMPLETE(numNodes= 0 < minimum = 1) in file /kinglear.txt [36mnamenode_1 |[0m 18/05/09 03:38:21 INFO namenode.EditLogFileOutputStream: Nothing to flush [36mnamenode_1 |[0m 18/05/09 03:38:21 INFO hdfs.StateChange: DIR* completeFile: /kinglear.txt is closed by DFSClient_NONMAPREDUCE_-296196038_67 [32mtest_1 |[0m DEBUG Upload of 'kinglear.txt' to '/kinglear.txt' complete. [32mtest_1 |[0m INFO:root:Missing pipeline option (runner). Executing pipeline using the default runner: DirectRunner. [32mtest_1 |[0m INFO:root:==================== <function annotate_downstream_side_inputs at 0x7fbf21cccc80> ==================== [32mtest_1 |[0m INFO:root:==================== <function lift_combiners at 0x7fbf21cccf50> ==================== [32mtest_1 |[0m INFO:root:==================== <function expand_gbk at 0x7fbf21cccb18> ==================== [32mtest_1 |[0m INFO:root:==================== <function sink_flattens at 0x7fbf21ccc9b0> ==================== [32mtest_1 |[0m INFO:root:==================== <function greedily_fuse at 0x7fbf21ccc488> ==================== [32mtest_1 |[0m INFO:root:==================== <function sort_stages at 0x7fbf21ccc7d0> ==================== [32mtest_1 |[0m INFO:root:Running (((ref_AppliedPTransform_write/Write/WriteImpl/DoOnce/Read_16)+(ref_AppliedPTransform_write/Write/WriteImpl/InitializeWrite_17))+(ref_PCollection_PCollection_9/Write))+(ref_PCollection_PCollection_10/Write) [32mtest_1 |[0m INFO:root:start <DataOutputOperation ref_PCollection_PCollection_10/Write > [32mtest_1 |[0m INFO:root:start <DataOutputOperation ref_PCollection_PCollection_9/Write > [32mtest_1 |[0m INFO:root:start <DoOperation write/Write/WriteImpl/InitializeWrite output_tags=['out']> [32mtest_1 |[0m INFO:root:start <ReadOperation write/Write/WriteImpl/DoOnce/Read source=SourceBundle(weight=1.0, source=<apache_beam.transforms.core._CreateSource object at 0x7fbf21c94cd0>, start_position=None, stop_position=None)> [32mtest_1 |[0m INFO:root:finish <ReadOperation write/Write/WriteImpl/DoOnce/Read source=SourceBundle(weight=1.0, source=<apache_beam.transforms.core._CreateSource object at 0x7fbf21c94cd0>, start_position=None, stop_position=None), receivers=[ConsumerSet[write/Write/WriteImpl/DoOnce/Read.out0, coder=WindowedValueCoder[FastPrimitivesCoder], len(consumers)=2]]> [32mtest_1 |[0m INFO:root:finish <DoOperation write/Write/WriteImpl/InitializeWrite output_tags=['out'], receivers=[ConsumerSet[write/Write/WriteImpl/InitializeWrite.out0, coder=WindowedValueCoder[FastPrimitivesCoder], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DataOutputOperation ref_PCollection_PCollection_9/Write > [32mtest_1 |[0m INFO:root:finish <DataOutputOperation ref_PCollection_PCollection_10/Write > [32mtest_1 |[0m INFO:root:Running (ref_AppliedPTransform_read/Read_3)+((ref_AppliedPTransform_split_4)+((ref_AppliedPTransform_pair_with_one_5)+(group/Write))) [32mtest_1 |[0m INFO:root:start <DataOutputOperation group/Write > [32mtest_1 |[0m INFO:root:start <DoOperation pair_with_one output_tags=['out']> [32mtest_1 |[0m INFO:root:start <DoOperation split output_tags=['out']> [32mtest_1 |[0m INFO:root:start <ReadOperation read/Read source=SourceBundle(weight=1.0, source=<apache_beam.io.textio._TextSource object at 0x7fbf21ca38d0>, start_position=None, stop_position=None)> [33mdatanode_1 |[0m 18/05/09 03:38:22 INFO datanode.webhdfs: 172.18.0.4 GET /webhdfs/v1/kinglear.txt?op=OPEN&user.name=root&namenoderpcaddress=namenode:8020&length=157284&offset=0 200 [32mtest_1 |[0m INFO:root:finish <ReadOperation read/Read source=SourceBundle(weight=1.0, source=<apache_beam.io.textio._TextSource object at 0x7fbf21ca38d0>, start_position=None, stop_position=None), receivers=[ConsumerSet[read/Read.out0, coder=WindowedValueCoder[FastPrimitivesCoder], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DoOperation split output_tags=['out'], receivers=[ConsumerSet[split.out0, coder=WindowedValueCoder[StrUtf8Coder], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DoOperation pair_with_one output_tags=['out'], receivers=[ConsumerSet[pair_with_one.out0, coder=WindowedValueCoder[TupleCoder[LengthPrefixCoder[StrUtf8Coder], VarIntCoder]], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DataOutputOperation group/Write > [32mtest_1 |[0m INFO:root:Running (((group/Read)+((ref_AppliedPTransform_count_10)+(ref_AppliedPTransform_format_11)))+(ref_AppliedPTransform_write/Write/WriteImpl/WriteBundles_18))+((ref_AppliedPTransform_write/Write/WriteImpl/Pair_19)+((ref_AppliedPTransform_write/Write/WriteImpl/WindowInto(WindowIntoFn)_20)+(write/Write/WriteImpl/GroupByKey/Write))) [32mtest_1 |[0m INFO:root:start <DataOutputOperation write/Write/WriteImpl/GroupByKey/Write > [32mtest_1 |[0m INFO:root:start <DoOperation write/Write/WriteImpl/WindowInto(WindowIntoFn) output_tags=['out']> [32mtest_1 |[0m INFO:root:start <DoOperation write/Write/WriteImpl/Pair output_tags=['out']> [32mtest_1 |[0m INFO:root:start <DoOperation write/Write/WriteImpl/WriteBundles output_tags=['out']> [32mtest_1 |[0m INFO:root:start <DoOperation format output_tags=['out']> [32mtest_1 |[0m INFO:root:start <DoOperation count output_tags=['out']> [32mtest_1 |[0m INFO:root:start <DataInputOperation group/Read receivers=[ConsumerSet[group/Read.out0, coder=WindowedValueCoder[TupleCoder[LengthPrefixCoder[StrUtf8Coder], IterableCoder[VarIntCoder]]], len(consumers)=1]]> [32mtest_1 |[0m WARNING:root:Mime types are not supported. Got non-default mime_type: text/plain [33mdatanode_1 |[0m 18/05/09 03:38:24 INFO datanode.webhdfs: 172.18.0.4 PUT /webhdfs/v1/beam-temp-py-wordcount-integration-6c504a5e533a11e8823a0242ac120004/ac449561-e6d2-4fde-b06c-7e91f3114e11.py-wordcount-integration?op=CREATE&user.name=root&namenoderpcaddress=namenode:8020&createflag=&createparent=true&overwrite=false&user.name=root 201 [32mtest_1 |[0m INFO:root:finish <DataInputOperation group/Read receivers=[ConsumerSet[group/Read.out0, coder=WindowedValueCoder[TupleCoder[LengthPrefixCoder[StrUtf8Coder], IterableCoder[VarIntCoder]]], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DoOperation count output_tags=['out'], receivers=[ConsumerSet[count.out0, coder=WindowedValueCoder[TupleCoder[StrUtf8Coder, FastPrimitivesCoder]], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DoOperation format output_tags=['out'], receivers=[ConsumerSet[format.out0, coder=WindowedValueCoder[FastPrimitivesCoder], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DoOperation write/Write/WriteImpl/WriteBundles output_tags=['out'], receivers=[ConsumerSet[write/Write/WriteImpl/WriteBundles.out0, coder=WindowedValueCoder[FastPrimitivesCoder], len(consumers)=1]]> [36mnamenode_1 |[0m 18/05/09 03:38:24 INFO hdfs.StateChange: BLOCK* allocate blk_1073741826_1002, replicas=172.18.0.3:50010 for /beam-temp-py-wordcount-integration-6c504a5e533a11e8823a0242ac120004/ac449561-e6d2-4fde-b06c-7e91f3114e11.py-wordcount-integration [33mdatanode_1 |[0m 18/05/09 03:38:24 INFO datanode.DataNode: Receiving BP-892499668-172.18.0.2-1525837052284:blk_1073741826_1002 src: /172.18.0.3:56150 dest: /172.18.0.3:50010 [33mdatanode_1 |[0m 18/05/09 03:38:24 INFO DataNode.clienttrace: src: /172.18.0.3:56150, dest: /172.18.0.3:50010, bytes: 48944, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_972292288_69, offset: 0, srvID: ea93857e-c0c6-494c-887b-ab0ab1e96b58, blockid: BP-892499668-172.18.0.2-1525837052284:blk_1073741826_1002, duration: 3520869 [33mdatanode_1 |[0m 18/05/09 03:38:24 INFO datanode.DataNode: PacketResponder: BP-892499668-172.18.0.2-1525837052284:blk_1073741826_1002, type=LAST_IN_PIPELINE terminating [36mnamenode_1 |[0m 18/05/09 03:38:24 INFO hdfs.StateChange: DIR* completeFile: /beam-temp-py-wordcount-integration-6c504a5e533a11e8823a0242ac120004/ac449561-e6d2-4fde-b06c-7e91f3114e11.py-wordcount-integration is closed by DFSClient_NONMAPREDUCE_972292288_69 [32mtest_1 |[0m INFO:root:finish <DoOperation write/Write/WriteImpl/Pair output_tags=['out'], receivers=[ConsumerSet[write/Write/WriteImpl/Pair.out0, coder=WindowedValueCoder[TupleCoder[FastPrimitivesCoder, FastPrimitivesCoder]], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DoOperation write/Write/WriteImpl/WindowInto(WindowIntoFn) output_tags=['out'], receivers=[ConsumerSet[write/Write/WriteImpl/WindowInto(WindowIntoFn).out0, coder=WindowedValueCoder[TupleCoder[LengthPrefixCoder[FastPrimitivesCoder], LengthPrefixCoder[FastPrimitivesCoder]]], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DataOutputOperation write/Write/WriteImpl/GroupByKey/Write > [32mtest_1 |[0m INFO:root:Running (write/Write/WriteImpl/GroupByKey/Read)+((ref_AppliedPTransform_write/Write/WriteImpl/Extract_25)+(ref_PCollection_PCollection_17/Write)) [32mtest_1 |[0m INFO:root:start <DataOutputOperation ref_PCollection_PCollection_17/Write > [32mtest_1 |[0m INFO:root:start <DoOperation write/Write/WriteImpl/Extract output_tags=['out']> [32mtest_1 |[0m INFO:root:start <DataInputOperation write/Write/WriteImpl/GroupByKey/Read receivers=[ConsumerSet[write/Write/WriteImpl/GroupByKey/Read.out0, coder=WindowedValueCoder[TupleCoder[LengthPrefixCoder[FastPrimitivesCoder], IterableCoder[LengthPrefixCoder[FastPrimitivesCoder]]]], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DataInputOperation write/Write/WriteImpl/GroupByKey/Read receivers=[ConsumerSet[write/Write/WriteImpl/GroupByKey/Read.out0, coder=WindowedValueCoder[TupleCoder[LengthPrefixCoder[FastPrimitivesCoder], IterableCoder[LengthPrefixCoder[FastPrimitivesCoder]]]], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DoOperation write/Write/WriteImpl/Extract output_tags=['out'], receivers=[ConsumerSet[write/Write/WriteImpl/Extract.out0, coder=WindowedValueCoder[FastPrimitivesCoder], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DataOutputOperation ref_PCollection_PCollection_17/Write > [32mtest_1 |[0m INFO:root:Running ((ref_PCollection_PCollection_9/Read)+(ref_AppliedPTransform_write/Write/WriteImpl/PreFinalize_26))+(ref_PCollection_PCollection_18/Write) [32mtest_1 |[0m INFO:root:start <DataOutputOperation ref_PCollection_PCollection_18/Write > [32mtest_1 |[0m INFO:root:start <DoOperation write/Write/WriteImpl/PreFinalize output_tags=['out']> [32mtest_1 |[0m INFO:root:start <DataInputOperation ref_PCollection_PCollection_9/Read receivers=[ConsumerSet[ref_PCollection_PCollection_9/Read.out0, coder=WindowedValueCoder[FastPrimitivesCoder], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DataInputOperation ref_PCollection_PCollection_9/Read receivers=[ConsumerSet[ref_PCollection_PCollection_9/Read.out0, coder=WindowedValueCoder[FastPrimitivesCoder], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DoOperation write/Write/WriteImpl/PreFinalize output_tags=['out'], receivers=[ConsumerSet[write/Write/WriteImpl/PreFinalize.out0, coder=WindowedValueCoder[FastPrimitivesCoder], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DataOutputOperation ref_PCollection_PCollection_18/Write > [32mtest_1 |[0m INFO:root:Running (ref_PCollection_PCollection_9/Read)+(ref_AppliedPTransform_write/Write/WriteImpl/FinalizeWrite_27) [32mtest_1 |[0m INFO:root:start <DoOperation write/Write/WriteImpl/FinalizeWrite output_tags=['out']> [32mtest_1 |[0m INFO:root:start <DataInputOperation ref_PCollection_PCollection_9/Read receivers=[ConsumerSet[ref_PCollection_PCollection_9/Read.out0, coder=WindowedValueCoder[FastPrimitivesCoder], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:Starting finalize_write threads with num_shards: 1 (skipped: 0), batches: 1, num_threads: 1 [32mtest_1 |[0m INFO:root:Renamed 1 shards in 0.14 seconds. [32mtest_1 |[0m INFO:root:finish <DataInputOperation ref_PCollection_PCollection_9/Read receivers=[ConsumerSet[ref_PCollection_PCollection_9/Read.out0, coder=WindowedValueCoder[FastPrimitivesCoder], len(consumers)=1]]> [32mtest_1 |[0m INFO:root:finish <DoOperation write/Write/WriteImpl/FinalizeWrite output_tags=['out'], receivers=[ConsumerSet[write/Write/WriteImpl/FinalizeWrite.out0, coder=WindowedValueCoder[FastPrimitivesCoder], len(consumers)=0]]> [32mtest_1 |[0m INFO:root:number of empty lines: 1663 [32mtest_1 |[0m INFO:root:average word length: 4 [32mhdfs_it-jenkins-beam_postcommit_python_verify-4904_test_1 exited with code 0 [0mStopping hdfs_it-jenkins-beam_postcommit_python_verify-4904_datanode_1 ... Stopping hdfs_it-jenkins-beam_postcommit_python_verify-4904_namenode_1 ... [2A[2KStopping hdfs_it-jenkins-beam_postcommit_python_verify-4904_datanode_1 ... [32mdone[0m[2B[1A[2KStopping hdfs_it-jenkins-beam_postcommit_python_verify-4904_namenode_1 ... [32mdone[0m[1BAborting on container exit... real 1m16.534s user 0m0.805s sys 0m0.159s :pythonPostCommit (Thread[Task worker for ':' Thread 2,5,main]) completed. Took 38 mins 27.367 secs. BUILD SUCCESSFUL in 38m 33s 1 actionable task: 1 executed Publishing build scan... https://gradle.com/s/e5dpneixhplsu Build step 'Invoke Gradle script' changed build result to SUCCESS [Set GitHub commit status (universal)] SUCCESS on repos [GHRepository@6fe960c5[description=Apache Beam,homepage=,name=beam,fork=false,size=58754,milestones={},language=Java,commits={},source=<null>,parent=<null>,responseHeaderFields={null=[HTTP/1.1 200 OK], Access-Control-Allow-Origin=[*], Access-Control-Expose-Headers=[ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval], Cache-Control=[private, max-age=60, s-maxage=60], Content-Encoding=[gzip], Content-Security-Policy=[default-src 'none'], Content-Type=[application/json; charset=utf-8], Date=[Wed, 09 May 2018 03:38:51 GMT], ETag=[W/"f271032ca1513bfd5e28400c832a5d21"], Last-Modified=[Wed, 09 May 2018 00:26:13 GMT], OkHttp-Received-Millis=[1525837131315], OkHttp-Response-Source=[NETWORK 200], OkHttp-Selected-Protocol=[http/1.1], OkHttp-Sent-Millis=[1525837131178], Referrer-Policy=[origin-when-cross-origin, strict-origin-when-cross-origin], Server=[GitHub.com], Status=[200 OK], Strict-Transport-Security=[max-age=31536000; includeSubdomains; preload], Transfer-Encoding=[chunked], Vary=[Accept, Authorization, Cookie, X-GitHub-OTP], X-Accepted-OAuth-Scopes=[repo], X-Content-Type-Options=[nosniff], X-Frame-Options=[deny], X-GitHub-Media-Type=[github.v3; format=json], X-GitHub-Request-Id=[D330:4F42:EBFACD:1DD7C12:5AF26D1B], X-OAuth-Scopes=[admin:repo_hook, repo, repo:status], X-RateLimit-Limit=[5000], X-RateLimit-Remaining=[4995], X-RateLimit-Reset=[1525840379], X-Runtime-rack=[0.049944], X-XSS-Protection=[1; mode=block]},url=https://api.github.com/repos/apache/beam,id=50904245]] (sha:60f90c8) with context:beam_PostCommit_Python_Verify Setting commit status on GitHub for https://github.com/apache/beam/commit/60f90c8dcb229c35a82c7be15e64a89678bae058 ERROR: Build step failed with exception java.io.FileNotFoundException: https://api.github.com/repos/apache/beam/statuses/60f90c8dcb229c35a82c7be15e64a89678bae058 at com.squareup.okhttp.internal.huc.HttpURLConnectionImpl.getInputStream(HttpURLConnectionImpl.java:243) at com.squareup.okhttp.internal.huc.DelegatingHttpsURLConnection.getInputStream(DelegatingHttpsURLConnection.java:210) at com.squareup.okhttp.internal.huc.HttpsURLConnectionImpl.getInputStream(HttpsURLConnectionImpl.java:25) at org.kohsuke.github.Requester.parse(Requester.java:612) at org.kohsuke.github.Requester.parse(Requester.java:594) at org.kohsuke.github.Requester._to(Requester.java:272) Caused: org.kohsuke.github.GHFileNotFoundException: {"message":"Not Found","documentation_url":"https://developer.github.com/v3/repos/statuses/#create-a-status"} at org.kohsuke.github.Requester.handleApiError(Requester.java:686) at org.kohsuke.github.Requester._to(Requester.java:293) at org.kohsuke.github.Requester.to(Requester.java:234) at org.kohsuke.github.GHRepository.createCommitStatus(GHRepository.java:1075) at org.jenkinsci.plugins.github.status.GitHubCommitStatusSetter.perform(GitHubCommitStatusSetter.java:160) Caused: org.jenkinsci.plugins.github.common.CombineErrorHandler$ErrorHandlingException at org.jenkinsci.plugins.github.common.CombineErrorHandler.handle(CombineErrorHandler.java:74) at org.jenkinsci.plugins.github.status.GitHubCommitStatusSetter.perform(GitHubCommitStatusSetter.java:164) at com.cloudbees.jenkins.GitHubCommitNotifier.perform(GitHubCommitNotifier.java:151) at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81) at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20) at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744) at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690) at hudson.model.Build$BuildExecution.post2(Build.java:186) at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635) at hudson.model.Run.execute(Run.java:1749) at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43) at hudson.model.ResourceController.execute(ResourceController.java:97) at hudson.model.Executor.run(Executor.java:429) Build step 'Set build status on GitHub commit [deprecated]' marked build as failure