Build: https://jenkins.thetaphi.de/job/Lucene-Solr-6.6-Linux/176/ Java: 32bit/jdk1.8.0_144 -server -XX:+UseG1GC
1 tests failed. FAILED: org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test Error Message: Expected 2 of 3 replicas to be active but only found 1; [core_node3:{"core":"c8n_1x3_lf_shard1_replica2","base_url":"http://127.0.0.1:36713","node_name":"127.0.0.1:36713_","state":"active","leader":"true"}]; clusterState: DocCollection(c8n_1x3_lf//clusterstate.json/27)={ "replicationFactor":"3", "shards":{"shard1":{ "range":"80000000-7fffffff", "state":"active", "replicas":{ "core_node1":{ "state":"down", "base_url":"http://127.0.0.1:44057", "core":"c8n_1x3_lf_shard1_replica1", "node_name":"127.0.0.1:44057_"}, "core_node2":{ "core":"c8n_1x3_lf_shard1_replica3", "base_url":"http://127.0.0.1:44247", "node_name":"127.0.0.1:44247_", "state":"down"}, "core_node3":{ "core":"c8n_1x3_lf_shard1_replica2", "base_url":"http://127.0.0.1:36713", "node_name":"127.0.0.1:36713_", "state":"active", "leader":"true"}}}}, "router":{"name":"compositeId"}, "maxShardsPerNode":"1", "autoAddReplicas":"false"} Stack Trace: java.lang.AssertionError: Expected 2 of 3 replicas to be active but only found 1; [core_node3:{"core":"c8n_1x3_lf_shard1_replica2","base_url":"http://127.0.0.1:36713","node_name":"127.0.0.1:36713_","state":"active","leader":"true"}]; clusterState: DocCollection(c8n_1x3_lf//clusterstate.json/27)={ "replicationFactor":"3", "shards":{"shard1":{ "range":"80000000-7fffffff", "state":"active", "replicas":{ "core_node1":{ "state":"down", "base_url":"http://127.0.0.1:44057", "core":"c8n_1x3_lf_shard1_replica1", "node_name":"127.0.0.1:44057_"}, "core_node2":{ "core":"c8n_1x3_lf_shard1_replica3", "base_url":"http://127.0.0.1:44247", "node_name":"127.0.0.1:44247_", "state":"down"}, "core_node3":{ "core":"c8n_1x3_lf_shard1_replica2", "base_url":"http://127.0.0.1:36713", "node_name":"127.0.0.1:36713_", "state":"active", "leader":"true"}}}}, "router":{"name":"compositeId"}, "maxShardsPerNode":"1", "autoAddReplicas":"false"} at __randomizedtesting.SeedInfo.seed([52A3B40E92477BDA:DAF78BD43CBB1622]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.assertTrue(Assert.java:43) at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:168) at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:55) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:992) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:967) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) Build Log: [...truncated 11795 lines...] [junit4] Suite: org.apache.solr.cloud.LeaderFailoverAfterPartitionTest [junit4] 2> Creating dataDir: /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/init-core-data-001 [junit4] 2> 693893 WARN (SUITE-LeaderFailoverAfterPartitionTest-seed#[52A3B40E92477BDA]-worker) [ ] o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=2 numCloses=2 [junit4] 2> 693894 INFO (SUITE-LeaderFailoverAfterPartitionTest-seed#[52A3B40E92477BDA]-worker) [ ] o.a.s.SolrTestCaseJ4 Using PointFields [junit4] 2> 693895 INFO (SUITE-LeaderFailoverAfterPartitionTest-seed#[52A3B40E92477BDA]-worker) [ ] o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via: @org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl=https://issues.apache.org/jira/browse/SOLR-5776) [junit4] 2> 693895 INFO (SUITE-LeaderFailoverAfterPartitionTest-seed#[52A3B40E92477BDA]-worker) [ ] o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: / [junit4] 2> 693896 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER [junit4] 2> 693896 INFO (Thread-1424) [ ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0 [junit4] 2> 693896 INFO (Thread-1424) [ ] o.a.s.c.ZkTestServer Starting server [junit4] 2> 693900 ERROR (Thread-1424) [ ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes [junit4] 2> 693996 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.ZkTestServer start zk server on port:38605 [junit4] 2> 694001 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml to /configs/conf1/solrconfig.xml [junit4] 2> 694001 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/schema.xml to /configs/conf1/schema.xml [junit4] 2> 694002 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml to /configs/conf1/solrconfig.snippet.randomindexconfig.xml [junit4] 2> 694002 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/stopwords.txt to /configs/conf1/stopwords.txt [junit4] 2> 694003 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/protwords.txt to /configs/conf1/protwords.txt [junit4] 2> 694003 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/currency.xml to /configs/conf1/currency.xml [junit4] 2> 694004 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml to /configs/conf1/enumsConfig.xml [junit4] 2> 694004 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json to /configs/conf1/open-exchange-rates.json [junit4] 2> 694005 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt to /configs/conf1/mapping-ISOLatin1Accent.txt [junit4] 2> 694005 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt to /configs/conf1/old_synonyms.txt [junit4] 2> 694006 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/synonyms.txt to /configs/conf1/synonyms.txt [junit4] 2> 694066 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/control-001/cores/collection1 [junit4] 2> 694067 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.Server jetty-9.3.14.v20161028 [junit4] 2> 694068 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@9f5300{/,null,AVAILABLE} [junit4] 2> 694068 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@e9af4a{HTTP/1.1,[http/1.1]}{127.0.0.1:๓๔๗๘๕} [junit4] 2> 694068 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.Server Started @๖๙๕๘๙๖ms [junit4] 2> 694068 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/tempDir-001/control/data, hostContext=/, hostPort=39243, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/control-001/cores} [junit4] 2> 694068 ERROR (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 694068 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version 6.6.2 [junit4] 2> 694069 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 694069 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null [junit4] 2> 694069 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2017-10-18T15:41:15.831Z [junit4] 2> 694071 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper) [junit4] 2> 694071 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/control-001/solr.xml [junit4] 2> 694094 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: socketTimeout=340000&connTimeout=45000&retry=true [junit4] 2> 694094 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:38605/solr [junit4] 2> 694103 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:39243_ ] o.a.s.c.Overseer Overseer (id=null) closing [junit4] 2> 694104 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:39243_ ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:39243_ [junit4] 2> 694104 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:39243_ ] o.a.s.c.Overseer Overseer (id=98850653841588228-127.0.0.1:39243_-n_0000000000) starting [junit4] 2> 694106 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:39243_ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:39243_ [junit4] 2> 694107 INFO (zkCallback-846-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1) [junit4] 2> 694197 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:39243_ ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/control-001/cores [junit4] 2> 694197 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:39243_ ] o.a.s.c.CorePropertiesLocator Cores are: [collection1] [junit4] 2> 694197 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores [junit4] 2> 694198 INFO (OverseerStateUpdate-98850653841588228-127.0.0.1:39243_-n_0000000000) [n:127.0.0.1:39243_ ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard1 [junit4] 2> 695211 WARN (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.c.Config Beginning with Solr 5.5, <mergePolicy> is deprecated, use <mergePolicyFactory> instead. [junit4] 2> 695212 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 6.6.2 [junit4] 2> 695227 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test [junit4] 2> 695298 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id [junit4] 2> 695304 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection control_collection, trusted=true [junit4] 2> 695304 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder [junit4] 2> 695304 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/control-001/cores/collection1], dataDir=[/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/control-001/cores/collection1/data/] [junit4] 2> 695305 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@77dc45 [junit4] 2> 695305 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=45, maxMergeAtOnceExplicit=48, maxMergedSegmentMB=68.57421875, floorSegmentMB=1.0830078125, forceMergeDeletesPctAllowed=21.822731206422706, segmentsPerTier=25.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.30505255139331733 [junit4] 2> 695308 WARN (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}} [junit4] 2> 695330 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog [junit4] 2> 695330 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536 [junit4] 2> 695331 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled [junit4] 2> 695331 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled [junit4] 2> 695331 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=44, maxMergeAtOnceExplicit=36, maxMergedSegmentMB=64.6962890625, floorSegmentMB=0.98828125, forceMergeDeletesPctAllowed=11.982498655858757, segmentsPerTier=30.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.23429108886169844 [junit4] 2> 695331 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@1690763[collection1] main] [junit4] 2> 695332 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1 [junit4] 2> 695332 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1 [junit4] 2> 695332 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000 [junit4] 2> 695333 INFO (searcherExecutor-2495-thread-1-processing-n:127.0.0.1:39243_ x:collection1 c:control_collection) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@1690763[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())} [junit4] 2> 695333 INFO (coreLoadExecutor-2494-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ c:control_collection x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1581610462971166720 [junit4] 2> 695336 INFO (coreZkRegister-2487-thread-1-processing-n:127.0.0.1:39243_ x:collection1 c:control_collection) [n:127.0.0.1:39243_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue. [junit4] 2> 695336 INFO (coreZkRegister-2487-thread-1-processing-n:127.0.0.1:39243_ x:collection1 c:control_collection) [n:127.0.0.1:39243_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync [junit4] 2> 695336 INFO (coreZkRegister-2487-thread-1-processing-n:127.0.0.1:39243_ x:collection1 c:control_collection) [n:127.0.0.1:39243_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:39243/collection1/ [junit4] 2> 695336 INFO (coreZkRegister-2487-thread-1-processing-n:127.0.0.1:39243_ x:collection1 c:control_collection) [n:127.0.0.1:39243_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me [junit4] 2> 695336 INFO (coreZkRegister-2487-thread-1-processing-n:127.0.0.1:39243_ x:collection1 c:control_collection) [n:127.0.0.1:39243_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:39243/collection1/ has no replicas [junit4] 2> 695336 INFO (coreZkRegister-2487-thread-1-processing-n:127.0.0.1:39243_ x:collection1 c:control_collection) [n:127.0.0.1:39243_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR [junit4] 2> 695337 INFO (coreZkRegister-2487-thread-1-processing-n:127.0.0.1:39243_ x:collection1 c:control_collection) [n:127.0.0.1:39243_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:39243/collection1/ shard1 [junit4] 2> 695343 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1) [junit4] 2> 695343 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:38605/solr ready [junit4] 2> 695343 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection loss:false [junit4] 2> 695400 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-1-001/cores/collection1 [junit4] 2> 695400 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-1-001 [junit4] 2> 695401 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.Server jetty-9.3.14.v20161028 [junit4] 2> 695401 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@13e930a{/,null,AVAILABLE} [junit4] 2> 695402 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@bcf6a5{HTTP/1.1,[http/1.1]}{127.0.0.1:๓๗๓๙๗} [junit4] 2> 695402 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.Server Started @๖๙๗๒๒๙ms [junit4] 2> 695402 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/tempDir-001/jetty1, solrconfig=solrconfig.xml, hostContext=/, hostPort=44057, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-1-001/cores} [junit4] 2> 695402 ERROR (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 695402 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version 6.6.2 [junit4] 2> 695402 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 695402 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null [junit4] 2> 695402 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2017-10-18T15:41:17.164Z [junit4] 2> 695404 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper) [junit4] 2> 695404 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-1-001/solr.xml [junit4] 2> 695408 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: socketTimeout=340000&connTimeout=45000&retry=true [junit4] 2> 695409 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:38605/solr [junit4] 2> 695415 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:44057_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1) [junit4] 2> 695415 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:44057_ ] o.a.s.c.Overseer Overseer (id=null) closing [junit4] 2> 695416 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:44057_ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:44057_ [junit4] 2> 695416 INFO (zkCallback-846-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2) [junit4] 2> 695416 INFO (zkCallback-855-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2) [junit4] 2> 695417 INFO (zkCallback-850-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2) [junit4] 2> 695488 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:44057_ ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-1-001/cores [junit4] 2> 695488 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:44057_ ] o.a.s.c.CorePropertiesLocator Cores are: [collection1] [junit4] 2> 695488 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores [junit4] 2> 695489 INFO (OverseerStateUpdate-98850653841588228-127.0.0.1:39243_-n_0000000000) [n:127.0.0.1:39243_ ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard2 [junit4] 2> 695638 INFO (coreZkRegister-2487-thread-1-processing-n:127.0.0.1:39243_ x:collection1 c:control_collection) [n:127.0.0.1:39243_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary [junit4] 2> 696504 WARN (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.c.Config Beginning with Solr 5.5, <mergePolicy> is deprecated, use <mergePolicyFactory> instead. [junit4] 2> 696505 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 6.6.2 [junit4] 2> 696521 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test [junit4] 2> 696590 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id [junit4] 2> 696596 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection collection1, trusted=true [junit4] 2> 696596 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder [junit4] 2> 696596 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-1-001/cores/collection1], dataDir=[/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-1-001/cores/collection1/data/] [junit4] 2> 696596 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@77dc45 [junit4] 2> 696597 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=45, maxMergeAtOnceExplicit=48, maxMergedSegmentMB=68.57421875, floorSegmentMB=1.0830078125, forceMergeDeletesPctAllowed=21.822731206422706, segmentsPerTier=25.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.30505255139331733 [junit4] 2> 696600 WARN (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}} [junit4] 2> 696624 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog [junit4] 2> 696624 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536 [junit4] 2> 696624 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled [junit4] 2> 696624 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled [junit4] 2> 696625 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=44, maxMergeAtOnceExplicit=36, maxMergedSegmentMB=64.6962890625, floorSegmentMB=0.98828125, forceMergeDeletesPctAllowed=11.982498655858757, segmentsPerTier=30.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.23429108886169844 [junit4] 2> 696625 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@f34151[collection1] main] [junit4] 2> 696626 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1 [junit4] 2> 696626 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1 [junit4] 2> 696626 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000 [junit4] 2> 696627 INFO (searcherExecutor-2506-thread-1-processing-n:127.0.0.1:44057_ x:collection1 c:collection1) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@f34151[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())} [junit4] 2> 696627 INFO (coreLoadExecutor-2505-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ c:collection1 x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1581610464328024064 [junit4] 2> 696631 INFO (coreZkRegister-2500-thread-1-processing-n:127.0.0.1:44057_ x:collection1 c:collection1) [n:127.0.0.1:44057_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue. [junit4] 2> 696631 INFO (coreZkRegister-2500-thread-1-processing-n:127.0.0.1:44057_ x:collection1 c:collection1) [n:127.0.0.1:44057_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync [junit4] 2> 696631 INFO (coreZkRegister-2500-thread-1-processing-n:127.0.0.1:44057_ x:collection1 c:collection1) [n:127.0.0.1:44057_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:44057/collection1/ [junit4] 2> 696631 INFO (coreZkRegister-2500-thread-1-processing-n:127.0.0.1:44057_ x:collection1 c:collection1) [n:127.0.0.1:44057_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me [junit4] 2> 696631 INFO (coreZkRegister-2500-thread-1-processing-n:127.0.0.1:44057_ x:collection1 c:collection1) [n:127.0.0.1:44057_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:44057/collection1/ has no replicas [junit4] 2> 696631 INFO (coreZkRegister-2500-thread-1-processing-n:127.0.0.1:44057_ x:collection1 c:collection1) [n:127.0.0.1:44057_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR [junit4] 2> 696632 INFO (coreZkRegister-2500-thread-1-processing-n:127.0.0.1:44057_ x:collection1 c:collection1) [n:127.0.0.1:44057_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:44057/collection1/ shard2 [junit4] 2> 696697 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-2-001/cores/collection1 [junit4] 2> 696697 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-2-001 [junit4] 2> 696698 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.Server jetty-9.3.14.v20161028 [junit4] 2> 696699 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@1f13456{/,null,AVAILABLE} [junit4] 2> 696699 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@965b3{HTTP/1.1,[http/1.1]}{127.0.0.1:๓๙๗๔๕} [junit4] 2> 696699 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.Server Started @๖๙๘๕๒๗ms [junit4] 2> 696699 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/tempDir-001/jetty2, solrconfig=solrconfig.xml, hostContext=/, hostPort=44247, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-2-001/cores} [junit4] 2> 696699 ERROR (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 696700 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version 6.6.2 [junit4] 2> 696700 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 696700 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null [junit4] 2> 696700 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2017-10-18T15:41:18.462Z [junit4] 2> 696701 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper) [junit4] 2> 696701 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-2-001/solr.xml [junit4] 2> 696705 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: socketTimeout=340000&connTimeout=45000&retry=true [junit4] 2> 696706 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:38605/solr [junit4] 2> 696710 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:44247_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2) [junit4] 2> 696711 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:44247_ ] o.a.s.c.Overseer Overseer (id=null) closing [junit4] 2> 696711 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:44247_ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:44247_ [junit4] 2> 696712 INFO (zkCallback-850-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3) [junit4] 2> 696712 INFO (zkCallback-861-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3) [junit4] 2> 696712 INFO (zkCallback-846-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3) [junit4] 2> 696712 INFO (zkCallback-855-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3) [junit4] 2> 696768 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:44247_ ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-2-001/cores [junit4] 2> 696768 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:44247_ ] o.a.s.c.CorePropertiesLocator Cores are: [collection1] [junit4] 2> 696769 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores [junit4] 2> 696770 INFO (OverseerStateUpdate-98850653841588228-127.0.0.1:39243_-n_0000000000) [n:127.0.0.1:39243_ ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard1 [junit4] 2> 696883 INFO (coreZkRegister-2500-thread-1-processing-n:127.0.0.1:44057_ x:collection1 c:collection1) [n:127.0.0.1:44057_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary [junit4] 2> 697777 WARN (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.c.Config Beginning with Solr 5.5, <mergePolicy> is deprecated, use <mergePolicyFactory> instead. [junit4] 2> 697777 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 6.6.2 [junit4] 2> 697786 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test [junit4] 2> 697881 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id [junit4] 2> 697892 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection collection1, trusted=true [junit4] 2> 697893 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder [junit4] 2> 697893 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-2-001/cores/collection1], dataDir=[/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-2-001/cores/collection1/data/] [junit4] 2> 697893 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@77dc45 [junit4] 2> 697894 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=45, maxMergeAtOnceExplicit=48, maxMergedSegmentMB=68.57421875, floorSegmentMB=1.0830078125, forceMergeDeletesPctAllowed=21.822731206422706, segmentsPerTier=25.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.30505255139331733 [junit4] 2> 697897 WARN (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}} [junit4] 2> 697926 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog [junit4] 2> 697926 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536 [junit4] 2> 697927 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled [junit4] 2> 697927 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled [junit4] 2> 697928 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=44, maxMergeAtOnceExplicit=36, maxMergedSegmentMB=64.6962890625, floorSegmentMB=0.98828125, forceMergeDeletesPctAllowed=11.982498655858757, segmentsPerTier=30.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.23429108886169844 [junit4] 2> 697928 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@63405b[collection1] main] [junit4] 2> 697940 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1 [junit4] 2> 697940 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1 [junit4] 2> 697940 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000 [junit4] 2> 697941 INFO (coreLoadExecutor-2516-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1581610465705852928 [junit4] 2> 697944 INFO (searcherExecutor-2517-thread-1-processing-n:127.0.0.1:44247_ x:collection1 c:collection1) [n:127.0.0.1:44247_ c:collection1 x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@63405b[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())} [junit4] 2> 697950 INFO (coreZkRegister-2511-thread-1-processing-n:127.0.0.1:44247_ x:collection1 c:collection1) [n:127.0.0.1:44247_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue. [junit4] 2> 697950 INFO (coreZkRegister-2511-thread-1-processing-n:127.0.0.1:44247_ x:collection1 c:collection1) [n:127.0.0.1:44247_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync [junit4] 2> 697950 INFO (coreZkRegister-2511-thread-1-processing-n:127.0.0.1:44247_ x:collection1 c:collection1) [n:127.0.0.1:44247_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:44247/collection1/ [junit4] 2> 697950 INFO (coreZkRegister-2511-thread-1-processing-n:127.0.0.1:44247_ x:collection1 c:collection1) [n:127.0.0.1:44247_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me [junit4] 2> 697950 INFO (coreZkRegister-2511-thread-1-processing-n:127.0.0.1:44247_ x:collection1 c:collection1) [n:127.0.0.1:44247_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:44247/collection1/ has no replicas [junit4] 2> 697950 INFO (coreZkRegister-2511-thread-1-processing-n:127.0.0.1:44247_ x:collection1 c:collection1) [n:127.0.0.1:44247_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR [junit4] 2> 697952 INFO (coreZkRegister-2511-thread-1-processing-n:127.0.0.1:44247_ x:collection1 c:collection1) [n:127.0.0.1:44247_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:44247/collection1/ shard1 [junit4] 2> 698030 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-3-001/cores/collection1 [junit4] 2> 698030 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-3-001 [junit4] 2> 698031 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.Server jetty-9.3.14.v20161028 [junit4] 2> 698032 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@b66d73{/,null,AVAILABLE} [junit4] 2> 698032 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@1f8c40a{HTTP/1.1,[http/1.1]}{127.0.0.1:๔๓๖๐๙} [junit4] 2> 698032 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.Server Started @๖๙๙๘๖๐ms [junit4] 2> 698033 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/tempDir-001/jetty3, solrconfig=solrconfig.xml, hostContext=/, hostPort=36713, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-3-001/cores} [junit4] 2> 698033 ERROR (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 698033 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version 6.6.2 [junit4] 2> 698033 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 698033 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null [junit4] 2> 698033 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2017-10-18T15:41:19.795Z [junit4] 2> 698035 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper) [junit4] 2> 698035 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-3-001/solr.xml [junit4] 2> 698042 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: socketTimeout=340000&connTimeout=45000&retry=true [junit4] 2> 698044 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:38605/solr [junit4] 2> 698053 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:36713_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3) [junit4] 2> 698054 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:36713_ ] o.a.s.c.Overseer Overseer (id=null) closing [junit4] 2> 698056 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:36713_ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:36713_ [junit4] 2> 698056 INFO (zkCallback-867-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 698057 INFO (zkCallback-850-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 698056 INFO (zkCallback-861-thread-1-processing-n:127.0.0.1:44247_) [n:127.0.0.1:44247_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 698056 INFO (zkCallback-846-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 698056 INFO (zkCallback-855-thread-1-processing-n:127.0.0.1:44057_) [n:127.0.0.1:44057_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 698102 INFO (coreZkRegister-2511-thread-1-processing-n:127.0.0.1:44247_ x:collection1 c:collection1) [n:127.0.0.1:44247_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary [junit4] 2> 698180 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:36713_ ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-3-001/cores [junit4] 2> 698180 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [n:127.0.0.1:36713_ ] o.a.s.c.CorePropertiesLocator Cores are: [collection1] [junit4] 2> 698180 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores [junit4] 2> 698184 INFO (OverseerStateUpdate-98850653841588228-127.0.0.1:39243_-n_0000000000) [n:127.0.0.1:39243_ ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard2 [junit4] 2> 699197 WARN (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.c.Config Beginning with Solr 5.5, <mergePolicy> is deprecated, use <mergePolicyFactory> instead. [junit4] 2> 699198 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 6.6.2 [junit4] 2> 699210 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test [junit4] 2> 699297 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id [junit4] 2> 699304 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection collection1, trusted=true [junit4] 2> 699305 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder [junit4] 2> 699305 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-3-001/cores/collection1], dataDir=[/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-3-001/cores/collection1/data/] [junit4] 2> 699305 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@77dc45 [junit4] 2> 699307 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=45, maxMergeAtOnceExplicit=48, maxMergedSegmentMB=68.57421875, floorSegmentMB=1.0830078125, forceMergeDeletesPctAllowed=21.822731206422706, segmentsPerTier=25.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.30505255139331733 [junit4] 2> 699379 WARN (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}} [junit4] 2> 699415 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog [junit4] 2> 699415 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536 [junit4] 2> 699416 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled [junit4] 2> 699416 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled [junit4] 2> 699417 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=44, maxMergeAtOnceExplicit=36, maxMergedSegmentMB=64.6962890625, floorSegmentMB=0.98828125, forceMergeDeletesPctAllowed=11.982498655858757, segmentsPerTier=30.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.23429108886169844 [junit4] 2> 699417 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@136e20c[collection1] main] [junit4] 2> 699418 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1 [junit4] 2> 699419 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1 [junit4] 2> 699419 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000 [junit4] 2> 699421 INFO (searcherExecutor-2528-thread-1-processing-n:127.0.0.1:36713_ x:collection1 c:collection1) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@136e20c[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())} [junit4] 2> 699421 INFO (coreLoadExecutor-2527-thread-1-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1581610467257745408 [junit4] 2> 699423 INFO (coreZkRegister-2522-thread-1-processing-n:127.0.0.1:36713_ x:collection1 c:collection1) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.ZkController Core needs to recover:collection1 [junit4] 2> 699424 INFO (updateExecutor-864-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DefaultSolrCoreState Running recovery [junit4] 2> 699424 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Starting recovery process. recoveringAfterStartup=true [junit4] 2> 699424 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy ###### startupVersions=[[]] [junit4] 2> 699424 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Begin buffering updates. core=[collection1] [junit4] 2> 699424 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.UpdateLog Starting to buffer updates. FSUpdateLog{state=ACTIVE, tlog=null} [junit4] 2> 699424 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Publishing state of core [collection1] as recovering, leader is [http://127.0.0.1:44057/collection1/] and I am [http://127.0.0.1:36713/collection1/] [junit4] 2> 699432 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Sending prep recovery command to [http://127.0.0.1:44057]; [WaitForState: action=PREPRECOVERY&core=collection1&nodeName=127.0.0.1:36713_&coreNodeName=core_node3&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true] [junit4] 2> 699435 INFO (SocketProxy-Acceptor-44057) [ ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=50768,localport=44057], receiveBufferSize:531000 [junit4] 2> 699437 INFO (SocketProxy-Acceptor-44057) [ ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=37397,localport=51210], receiveBufferSize=530904 [junit4] 2> 699440 INFO (qtp1358857-5463) [n:127.0.0.1:44057_ ] o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node3, state: recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true [junit4] 2> 699440 INFO (qtp1358857-5463) [n:127.0.0.1:44057_ ] o.a.s.h.a.PrepRecoveryOp Will wait a max of 183 seconds to see collection1 (shard2 of collection1) have state: recovering [junit4] 2> 699440 INFO (qtp1358857-5463) [n:127.0.0.1:44057_ ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, shard=shard2, thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=down, localState=active, nodeName=127.0.0.1:36713_, coreNodeName=core_node3, onlyIfActiveCheckResult=false, nodeProps: core_node3:{"core":"collection1","base_url":"http://127.0.0.1:36713","node_name":"127.0.0.1:36713_","state":"down"} [junit4] 2> 699924 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.SolrTestCaseJ4 ###Starting test [junit4] 2> 699924 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractFullDistribZkTestBase Wait for recoveries to finish - wait 30000 for each attempt [junit4] 2> 699924 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection: collection1 failOnTimeout:true timeout (sec):30000 [junit4] 2> 700440 INFO (qtp1358857-5463) [n:127.0.0.1:44057_ ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, shard=shard2, thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=recovering, localState=active, nodeName=127.0.0.1:36713_, coreNodeName=core_node3, onlyIfActiveCheckResult=false, nodeProps: core_node3:{"core":"collection1","base_url":"http://127.0.0.1:36713","node_name":"127.0.0.1:36713_","state":"recovering"} [junit4] 2> 700440 INFO (qtp1358857-5463) [n:127.0.0.1:44057_ ] o.a.s.h.a.PrepRecoveryOp Waited coreNodeName: core_node3, state: recovering, checkLive: true, onlyIfLeader: true for: 1 seconds. [junit4] 2> 700440 INFO (qtp1358857-5463) [n:127.0.0.1:44057_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={nodeName=127.0.0.1:36713_&onlyIfLeaderActive=true&core=collection1&coreNodeName=core_node3&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2} status=0 QTime=1000 [junit4] 2> 700941 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Attempting to PeerSync from [http://127.0.0.1:44057/collection1/] - recoveringAfterStartup=[true] [junit4] 2> 700941 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.PeerSync PeerSync: core=collection1 url=http://127.0.0.1:36713 START replicas=[http://127.0.0.1:44057/collection1/] nUpdates=100 [junit4] 2> 700942 INFO (SocketProxy-Acceptor-44057) [ ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=50772,localport=44057], receiveBufferSize:531000 [junit4] 2> 700942 INFO (SocketProxy-Acceptor-44057) [ ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=37397,localport=51214], receiveBufferSize=530904 [junit4] 2> 700944 INFO (qtp1358857-5463) [n:127.0.0.1:44057_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.IndexFingerprint IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0} [junit4] 2> 700944 INFO (qtp1358857-5463) [n:127.0.0.1:44057_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request [collection1] webapp= path=/get params={distrib=false&qt=/get&getFingerprint=9223372036854775807&wt=javabin&version=2} status=0 QTime=0 [junit4] 2> 700945 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.IndexFingerprint IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0} [junit4] 2> 700945 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.PeerSync We are already in sync. No need to do a PeerSync [junit4] 2> 700945 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false} [junit4] 2> 700945 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit. [junit4] 2> 700945 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush [junit4] 2> 700945 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy PeerSync stage of recovery was successful. [junit4] 2> 700945 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Replaying updates buffered during PeerSync. [junit4] 2> 700945 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy No replay needed. [junit4] 2> 700945 INFO (recoveryExecutor-865-thread-1-processing-n:127.0.0.1:36713_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Registering as Active after recovery. [junit4] 2> 701924 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractDistribZkTestBase Recoveries finished - collection: collection1 [junit4] 2> 701925 INFO (SocketProxy-Acceptor-39243) [ ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=43072,localport=39243], receiveBufferSize:531000 [junit4] 2> 701925 INFO (SocketProxy-Acceptor-39243) [ ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=34785,localport=56646], receiveBufferSize=530904 [junit4] 2> 701926 INFO (qtp32596230-5428) [n:127.0.0.1:39243_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false} [junit4] 2> 701926 INFO (qtp32596230-5428) [n:127.0.0.1:39243_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit. [junit4] 2> 701926 INFO (qtp32596230-5428) [n:127.0.0.1:39243_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush [junit4] 2> 701926 INFO (qtp32596230-5428) [n:127.0.0.1:39243_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp= path=/update params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=} 0 0 [junit4] 2> 701927 INFO (SocketProxy-Acceptor-44247) [ ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=59420,localport=44247], receiveBufferSize:531000 [junit4] 2> 701927 INFO (SocketProxy-Acceptor-44247) [ ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=39745,localport=54636], receiveBufferSize=530904 [junit4] 2> 701930 INFO (SocketProxy-Acceptor-44057) [ ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=50784,localport=44057], receiveBufferSize:531000 [junit4] 2> 701931 INFO (SocketProxy-Acceptor-44247) [ ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=59424,localport=44247], receiveBufferSize:531000 [junit4] 2> 701931 INFO (SocketProxy-Acceptor-36713) [ ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=45886,localport=36713], receiveBufferSize:531000 [junit4] 2> 701931 INFO (SocketProxy-Acceptor-44057) [ ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=37397,localport=51230], receiveBufferSize=530904 [junit4] 2> 701931 INFO (SocketProxy-Acceptor-44247) [ ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=39745,localport=54646], receiveBufferSize=530904 [junit4] 2> 701932 INFO (qtp1358857-5465) [n:127.0.0.1:44057_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false} [junit4] 2> 701932 INFO (qtp1358857-5465) [n:127.0.0.1:44057_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit. [junit4] 2> 701932 INFO (SocketProxy-Acceptor-36713) [ ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=43609,localport=47942], receiveBufferSize=530904 [junit4] 2> 701932 INFO (qtp1358857-5465) [n:127.0.0.1:44057_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush [junit4] 2> 701932 INFO (qtp1358857-5465) [n:127.0.0.1:44057_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp= path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:44247/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 0 [junit4] 2> 701933 INFO (qtp15588423-5488) [n:127.0.0.1:44247_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false} [junit4] 2> 701933 INFO (qtp15588423-5488) [n:127.0.0.1:44247_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit. [junit4] 2> 701933 INFO (qtp15588423-5488) [n:127.0.0.1:44247_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush [junit4] 2> 701934 INFO (qtp15588423-5488) [n:127.0.0.1:44247_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp= path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:44247/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 1 [junit4] 2> 701934 INFO (qtp7786401-5523) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false} [junit4] 2> 701934 INFO (qtp7786401-5523) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit. [junit4] 2> 701934 INFO (qtp7786401-5523) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush [junit4] 2> 701934 INFO (qtp7786401-5523) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp= path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:44247/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 0 [junit4] 2> 701936 INFO (qtp15588423-5494) [n:127.0.0.1:44247_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp= path=/update params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=} 0 7 [junit4] 2> 701937 INFO (SocketProxy-Acceptor-44057) [ ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=50796,localport=44057], receiveBufferSize:531000 [junit4] 2> 701937 INFO (SocketProxy-Acceptor-44057) [ ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=37397,localport=51238], receiveBufferSize=530904 [junit4] 2> 701938 INFO (qtp1358857-5463) [n:127.0.0.1:44057_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request [collection1] webapp= path=/select params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2} hits=0 status=0 QTime=0 [junit4] 2> 701938 INFO (SocketProxy-Acceptor-36713) [ ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=45902,localport=36713], receiveBufferSize:531000 [junit4] 2> 701939 INFO (SocketProxy-Acceptor-36713) [ ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=43609,localport=47950], receiveBufferSize=530904 [junit4] 2> 701940 INFO (qtp7786401-5518) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.S.Request [collection1] webapp= path=/select params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2} hits=0 status=0 QTime=0 [junit4] 2> 701940 INFO (SocketProxy-Acceptor-44247) [ ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=59444,localport=44247], receiveBufferSize:531000 [junit4] 2> 701941 INFO (SocketProxy-Acceptor-44247) [ ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=39745,localport=54660], receiveBufferSize=530904 [junit4] 2> 701942 INFO (qtp15588423-5493) [n:127.0.0.1:44247_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.S.Request [collection1] webapp= path=/select params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2} hits=0 status=0 QTime=0 [junit4] 2> 703942 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.AbstractFullDistribZkTestBase Creating collection with stateFormat=1: c8n_1x3_lf [junit4] 2> 703943 INFO (SocketProxy-Acceptor-44247) [ ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=59500,localport=44247], receiveBufferSize:531000 [junit4] 2> 703943 INFO (SocketProxy-Acceptor-44247) [ ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=39745,localport=54716], receiveBufferSize=530904 [junit4] 2> 703944 INFO (qtp15588423-5494) [n:127.0.0.1:44247_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params replicationFactor=3&maxShardsPerNode=1&name=c8n_1x3_lf&action=CREATE&numShards=1&stateFormat=1&wt=javabin&version=2 and sendToOCPQueue=true [junit4] 2> 703945 INFO (OverseerThreadFactory-2492-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ ] o.a.s.c.CreateCollectionCmd Create collection c8n_1x3_lf [junit4] 2> 703945 INFO (OverseerThreadFactory-2492-thread-1-processing-n:127.0.0.1:39243_) [n:127.0.0.1:39243_ ] o.a.s.c.CreateCollectionCmd Only one config set found in zk - using it:conf1 [junit4] 2> 704048 INFO (SocketProxy-Acceptor-44057) [ ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=50864,localport=44057], receiveBufferSize:531000 [junit4] 2> 704048 INFO (SocketProxy-Acceptor-44247) [ ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=59508,localport=44247], receiveBufferSize:531000 [junit4] 2> 704048 INFO (SocketProxy-Acceptor-36713) [ ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=45966,localport=36713], receiveBufferSize:531000 [junit4] 2> 704049 INFO (SocketProxy-Acceptor-44057) [ ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=37397,localport=51310], receiveBufferSize=530904 [junit4] 2> 704051 INFO (SocketProxy-Acceptor-36713) [ ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=43609,localport=48022], receiveBufferSize=530904 [junit4] 2> 704051 INFO (SocketProxy-Acceptor-44247) [ ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=39745,localport=54726], receiveBufferSize=530904 [junit4] 2> 704051 INFO (qtp1358857-5465) [n:127.0.0.1:44057_ ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&name=c8n_1x3_lf_shard1_replica1&action=CREATE&numShards=1&collection=c8n_1x3_lf&shard=shard1&wt=javabin&version=2 [junit4] 2> 704052 INFO (qtp7786401-5523) [n:127.0.0.1:36713_ ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&name=c8n_1x3_lf_shard1_replica2&action=CREATE&numShards=1&collection=c8n_1x3_lf&shard=shard1&wt=javabin&version=2 [junit4] 2> 704052 INFO (qtp15588423-5493) [n:127.0.0.1:44247_ ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&name=c8n_1x3_lf_shard1_replica3&action=CREATE&numShards=1&collection=c8n_1x3_lf&shard=shard1&wt=javabin&version=2 [junit4] 2> 705062 WARN (qtp15588423-5493) [n:127.0.0.1:44247_ c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica3] o.a.s.c.Config Beginning with Solr 5.5, <mergePolicy> is deprecated, use <mergePolicyFactory> instead. [junit4] 2> 705063 INFO (qtp15588423-5493) [n:127.0.0.1:44247_ c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica3] o.a.s.c.SolrConfig Using Lucene MatchVersion: 6.6.2 [junit4] 2> 705062 WARN (qtp7786401-5523) [n:127.0.0.1:36713_ c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica2] o.a.s.c.Config Beginning with Solr 5.5, <mergePolicy> is deprecated, use <mergePolicyFactory> instead. [junit4] 2> 705064 INFO (qtp7786401-5523) [n:127.0.0.1:36713_ c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica2] o.a.s.c.SolrConfig Using Lucene MatchVersion: 6.6.2 [junit4] 2> 705062 WARN (qtp1358857-5465) [n:127.0.0.1:44057_ c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica1] o.a.s.c.Config Beginning with Solr 5.5, <mergePolicy> is deprecated, use <mergePolicyFactory> instead. [junit4] 2> 705069 INFO (qtp1358857-5465) [n:127.0.0.1:44057_ c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 6.6.2 [junit4] 2> 705073 INFO (qtp15588423-5493) [n:127.0.0.1:44247_ c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica3] o.a.s.s.IndexSchema [c8n_1x3_lf_shard1_replica3] Schema name=test [junit4] 2> 705075 INFO (qtp7786401-5523) [n:127.0.0.1:36713_ c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica2] o.a.s.s.IndexSchema [c8n_1x3_lf_shard1_replica2] Schema name=test [junit4] 2> 705080 INFO (qtp1358857-5465) [n:127.0.0.1:44057_ c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica1] o.a.s.s.IndexSchema [c8n_1x3_lf_shard1_replica1] Schema name=test [junit4] 2> 705161 INFO (qtp15588423-5493) [n:127.0.0.1:44247_ c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica3] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id [junit4] 2> 705166 INFO (qtp15588423-5493) [n:127.0.0.1:44247_ c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica3] o.a.s.c.CoreContainer Creating SolrCore 'c8n_1x3_lf_shard1_replica3' using configuration from collection c8n_1x3_lf, trusted=true [junit4] 2> 705166 INFO (qtp15588423-5493) [n:127.0.0.1:44247_ c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica3] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder [junit4] 2> 705166 INFO (qtp15588423-5493) [n:127.0.0.1:44247_ c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica3] o.a.s.c.SolrCore [[c8n_1x3_lf_shard1_replica3] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-2-001/cores/c8n_1x3_lf_shard1_replica3], dataDir=[/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001/shard-2-001/cores/c8n_1x3_lf_shard1_replica3/data/] [junit4] 2> 70 [...truncated too long message...] lang.Thread.run(Thread.java:748) [junit4] 2> [junit4] 2> 821207 INFO (qtp7786401-5702) [n:127.0.0.1:36713_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={nodeName=127.0.0.1:44057_&onlyIfLeaderActive=true&core=c8n_1x3_lf_shard1_replica2&coreNodeName=core_node1&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2} status=400 QTime=6004 [junit4] 2> 821847 WARN (zkCallback-867-thread-3-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ ] o.a.s.c.c.ZkStateReader ZooKeeper watch triggered, but Solr cannot talk to ZK: [KeeperErrorCode = Session expired for /live_nodes] [junit4] 2> 822815 WARN (zkCallback-867-thread-5-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.SyncStrategy Closed, skipping sync up. [junit4] 2> 822815 INFO (zkCallback-867-thread-5-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.ShardLeaderElectionContext We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway [junit4] 2> 822815 INFO (zkCallback-867-thread-5-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.SolrCore [collection1] CLOSING SolrCore org.apache.solr.core.SolrCore@1c7f62a [junit4] 2> 822839 INFO (zkCallback-867-thread-5-processing-n:127.0.0.1:36713_) [n:127.0.0.1:36713_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.m.SolrMetricManager Closing metric reporters for: solr.core.collection1 [junit4] 2> 822844 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.AbstractConnector Stopped ServerConnector@1f8c40a{HTTP/1.1,[http/1.1]}{127.0.0.1:๐} [junit4] 2> 822845 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.e.j.s.h.ContextHandler Stopped o.e.j.s.ServletContextHandler@b66d73{/,null,UNAVAILABLE} [junit4] 2> 822846 ERROR (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes [junit4] 2> 822847 INFO (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.ZkTestServer connecting to 127.0.0.1:38605 38605 [junit4] 2> 827906 INFO (Thread-1424) [ ] o.a.s.c.ZkTestServer connecting to 127.0.0.1:38605 38605 [junit4] 2> 827906 WARN (Thread-1424) [ ] o.a.s.c.ZkTestServer Watch limit violations: [junit4] 2> Maximum concurrent create/delete watches above limit: [junit4] 2> [junit4] 2> 5 /solr/aliases.json [junit4] 2> 5 /solr/clusterprops.json [junit4] 2> 4 /solr/security.json [junit4] 2> 4 /solr/configs/conf1 [junit4] 2> 3 /solr/collections/c8n_1x3_lf/state.json [junit4] 2> 3 /solr/collections/collection1/state.json [junit4] 2> [junit4] 2> Maximum concurrent data watches above limit: [junit4] 2> [junit4] 2> 5 /solr/clusterstate.json [junit4] 2> 2 /solr/overseer_elect/election/98850653841588232-127.0.0.1:44057_-n_0000000001 [junit4] 2> [junit4] 2> Maximum concurrent children watches above limit: [junit4] 2> [junit4] 2> 5 /solr/live_nodes [junit4] 2> 5 /solr/collections [junit4] 2> 3 /solr/overseer/queue [junit4] 2> 3 /solr/overseer/collection-queue-work [junit4] 2> 3 /solr/overseer/queue-work [junit4] 2> [junit4] 2> 827907 WARN (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.SocketProxy Closing 1 connections to: http://127.0.0.1:44057/, target: http://127.0.0.1:37397/ [junit4] 2> 827907 WARN (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.SocketProxy Closing 16 connections to: http://127.0.0.1:36713/, target: http://127.0.0.1:43609/ [junit4] 2> 827907 WARN (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.SocketProxy Closing 0 connections to: http://127.0.0.1:44247/, target: http://127.0.0.1:39745/ [junit4] 2> 827907 WARN (TEST-LeaderFailoverAfterPartitionTest.test-seed#[52A3B40E92477BDA]) [ ] o.a.s.c.SocketProxy Closing 1 connections to: http://127.0.0.1:39243/, target: http://127.0.0.1:34785/ [junit4] 2> NOTE: reproduce with: ant test -Dtestcase=LeaderFailoverAfterPartitionTest -Dtests.method=test -Dtests.seed=52A3B40E92477BDA -Dtests.multiplier=3 -Dtests.slow=true -Dtests.locale=th-TH-u-nu-thai-x-lvariant-TH -Dtests.timezone=Africa/Dakar -Dtests.asserts=true -Dtests.file.encoding=UTF-8 [junit4] FAILURE 134s J1 | LeaderFailoverAfterPartitionTest.test <<< [junit4] > Throwable #1: java.lang.AssertionError: Expected 2 of 3 replicas to be active but only found 1; [core_node3:{"core":"c8n_1x3_lf_shard1_replica2","base_url":"http://127.0.0.1:36713","node_name":"127.0.0.1:36713_","state":"active","leader":"true"}]; clusterState: DocCollection(c8n_1x3_lf//clusterstate.json/27)={ [junit4] > "replicationFactor":"3", [junit4] > "shards":{"shard1":{ [junit4] > "range":"80000000-7fffffff", [junit4] > "state":"active", [junit4] > "replicas":{ [junit4] > "core_node1":{ [junit4] > "state":"down", [junit4] > "base_url":"http://127.0.0.1:44057", [junit4] > "core":"c8n_1x3_lf_shard1_replica1", [junit4] > "node_name":"127.0.0.1:44057_"}, [junit4] > "core_node2":{ [junit4] > "core":"c8n_1x3_lf_shard1_replica3", [junit4] > "base_url":"http://127.0.0.1:44247", [junit4] > "node_name":"127.0.0.1:44247_", [junit4] > "state":"down"}, [junit4] > "core_node3":{ [junit4] > "core":"c8n_1x3_lf_shard1_replica2", [junit4] > "base_url":"http://127.0.0.1:36713", [junit4] > "node_name":"127.0.0.1:36713_", [junit4] > "state":"active", [junit4] > "leader":"true"}}}}, [junit4] > "router":{"name":"compositeId"}, [junit4] > "maxShardsPerNode":"1", [junit4] > "autoAddReplicas":"false"} [junit4] > at __randomizedtesting.SeedInfo.seed([52A3B40E92477BDA:DAF78BD43CBB1622]:0) [junit4] > at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:168) [junit4] > at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:55) [junit4] > at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:992) [junit4] > at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:967) [junit4] > at java.lang.Thread.run(Thread.java:748) [junit4] 2> 827911 INFO (SUITE-LeaderFailoverAfterPartitionTest-seed#[52A3B40E92477BDA]-worker) [ ] o.a.s.SolrTestCaseJ4 ###deleteCore [junit4] 2> NOTE: leaving temporary files on disk at: /home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_52A3B40E92477BDA-001 [junit4] 2> NOTE: test params are: codec=HighCompressionCompressingStoredFields(storedFieldsFormat=CompressingStoredFieldsFormat(compressionMode=HIGH_COMPRESSION, chunkSize=4, maxDocsPerChunk=9, blockSize=8), termVectorsFormat=CompressingTermVectorsFormat(compressionMode=HIGH_COMPRESSION, chunkSize=4, blockSize=8)), sim=RandomSimilarity(queryNorm=true,coord=yes): {}, locale=th-TH-u-nu-thai-x-lvariant-TH, timezone=Africa/Dakar [junit4] 2> NOTE: Linux 4.10.0-33-generic i386/Oracle Corporation 1.8.0_144 (32-bit)/cpus=8,threads=1,free=176400952,total=536870912 [junit4] 2> NOTE: All tests run in this JVM: [TestStressVersions, TestDistributedStatsComponentCardinality, DistributedFacetPivotSmallTest, BasicFunctionalityTest, TestSQLHandlerNonCloud, TestCollectionAPIs, CollectionsAPIDistributedZkTest, SpatialFilterTest, RankQueryTest, TestWordDelimiterFilterFactory, ParsingFieldUpdateProcessorsTest, TestTolerantUpdateProcessorCloud, CSVRequestHandlerTest, ZkControllerTest, SchemaVersionSpecificBehaviorTest, TestLRUCache, TestUpdate, RemoteQueryErrorTest, TestClusterProperties, TestTrieFacet, ChaosMonkeySafeLeaderTest, BlockCacheTest, HdfsBasicDistributedZkTest, TestUnifiedSolrHighlighter, DistributedDebugComponentTest, TermsComponentTest, TestBM25SimilarityFactory, TestScoreJoinQPNoScore, SharedFSAutoReplicaFailoverUtilsTest, ShardRoutingTest, TestCharFilters, DeleteReplicaTest, TestConfigReload, ConnectionReuseTest, TestFileDictionaryLookup, SparseHLLTest, PreAnalyzedFieldManagedSchemaCloudTest, DistribCursorPagingTest, SolrCmdDistributorTest, DocValuesTest, JSONWriterTest, OpenCloseCoreStressTest, TestFuzzyAnalyzedSuggestions, SegmentsInfoRequestHandlerTest, ShardRoutingCustomTest, TestFieldTypeResource, TestSubQueryTransformerDistrib, DistributedFacetPivotLargeTest, WordBreakSolrSpellCheckerTest, TestCollapseQParserPlugin, RestartWhileUpdatingTest, TestOrdValues, TestSegmentSorting, URLClassifyProcessorTest, TestReloadDeadlock, TestLRUStatsCache, LoggingHandlerTest, TriLevelCompositeIdRoutingTest, TestQuerySenderListener, FullHLLTest, TestElisionMultitermQuery, SoftAutoCommitTest, TestRuleBasedAuthorizationPlugin, TestCollationFieldDocValues, TestEmbeddedSolrServerConstructors, TestDeleteCollectionOnDownNodes, MoveReplicaTest, TestGraphTermsQParserPlugin, TestFastOutputStream, TestJsonFacetRefinement, CollectionsAPISolrJTest, TestCSVResponseWriter, LeaderFailoverAfterPartitionTest] [junit4] Completed [253/713 (1!)] on J1 in 134.03s, 1 test, 1 failure <<< FAILURES! [...truncated 40995 lines...]
--------------------------------------------------------------------- To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org