Build: https://jenkins.thetaphi.de/job/Lucene-Solr-6.6-Linux/13/
Java: 64bit/jdk1.8.0_131 -XX:+UseCompressedOops -XX:+UseSerialGC

1 tests failed.
FAILED:  org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test

Error Message:
Expected 2 of 3 replicas to be active but only found 1; 
[core_node3:{"core":"c8n_1x3_lf_shard1_replica1","base_url":"http://127.0.0.1:38525/u","node_name":"127.0.0.1:38525_u","state":"active","leader":"true"}];
 clusterState: 
DocCollection(c8n_1x3_lf//collections/c8n_1x3_lf/state.json/17)={   
"replicationFactor":"3",   "shards":{"shard1":{       
"range":"80000000-7fffffff",       "state":"active",       "replicas":{         
"core_node1":{           "state":"down",           
"base_url":"http://127.0.0.1:34027/u";,           
"core":"c8n_1x3_lf_shard1_replica3",           
"node_name":"127.0.0.1:34027_u"},         "core_node2":{           
"core":"c8n_1x3_lf_shard1_replica2",           
"base_url":"http://127.0.0.1:34847/u";,           
"node_name":"127.0.0.1:34847_u",           "state":"down"},         
"core_node3":{           "core":"c8n_1x3_lf_shard1_replica1",           
"base_url":"http://127.0.0.1:38525/u";,           
"node_name":"127.0.0.1:38525_u",           "state":"active",           
"leader":"true"}}}},   "router":{"name":"compositeId"},   
"maxShardsPerNode":"1",   "autoAddReplicas":"false"}

Stack Trace:
java.lang.AssertionError: Expected 2 of 3 replicas to be active but only found 
1; 
[core_node3:{"core":"c8n_1x3_lf_shard1_replica1","base_url":"http://127.0.0.1:38525/u","node_name":"127.0.0.1:38525_u","state":"active","leader":"true"}];
 clusterState: DocCollection(c8n_1x3_lf//collections/c8n_1x3_lf/state.json/17)={
  "replicationFactor":"3",
  "shards":{"shard1":{
      "range":"80000000-7fffffff",
      "state":"active",
      "replicas":{
        "core_node1":{
          "state":"down",
          "base_url":"http://127.0.0.1:34027/u";,
          "core":"c8n_1x3_lf_shard1_replica3",
          "node_name":"127.0.0.1:34027_u"},
        "core_node2":{
          "core":"c8n_1x3_lf_shard1_replica2",
          "base_url":"http://127.0.0.1:34847/u";,
          "node_name":"127.0.0.1:34847_u",
          "state":"down"},
        "core_node3":{
          "core":"c8n_1x3_lf_shard1_replica1",
          "base_url":"http://127.0.0.1:38525/u";,
          "node_name":"127.0.0.1:38525_u",
          "state":"active",
          "leader":"true"}}}},
  "router":{"name":"compositeId"},
  "maxShardsPerNode":"1",
  "autoAddReplicas":"false"}
        at 
__randomizedtesting.SeedInfo.seed([30F4333592284485:B8A00CEF3CD4297D]:0)
        at org.junit.Assert.fail(Assert.java:93)
        at org.junit.Assert.assertTrue(Assert.java:43)
        at 
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:168)
        at 
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:55)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:992)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:967)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at java.lang.Thread.run(Thread.java:748)




Build Log:
[...truncated 12630 lines...]
   [junit4] Suite: org.apache.solr.cloud.LeaderFailoverAfterPartitionTest
   [junit4]   2> Creating dataDir: 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/init-core-data-001
   [junit4]   2> 1191398 WARN  
(SUITE-LeaderFailoverAfterPartitionTest-seed#[30F4333592284485]-worker) [    ] 
o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=68 numCloses=68
   [junit4]   2> 1191398 INFO  
(SUITE-LeaderFailoverAfterPartitionTest-seed#[30F4333592284485]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Using PointFields
   [junit4]   2> 1191399 INFO  
(SUITE-LeaderFailoverAfterPartitionTest-seed#[30F4333592284485]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via: 
@org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl=https://issues.apache.org/jira/browse/SOLR-5776)
   [junit4]   2> 1191399 INFO  
(SUITE-LeaderFailoverAfterPartitionTest-seed#[30F4333592284485]-worker) [    ] 
o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /u/
   [junit4]   2> 1191400 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 1191400 INFO  (Thread-2350) [    ] o.a.s.c.ZkTestServer client 
port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 1191400 INFO  (Thread-2350) [    ] o.a.s.c.ZkTestServer 
Starting server
   [junit4]   2> 1191401 ERROR (Thread-2350) [    ] o.a.z.s.ZooKeeperServer 
ZKShutdownHandler is not registered, so ZooKeeper server won't take any action 
on ERROR or SHUTDOWN server state changes
   [junit4]   2> 1191500 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.ZkTestServer start zk server on port:33611
   [junit4]   2> 1191504 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
 to /configs/conf1/solrconfig.xml
   [junit4]   2> 1191504 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/schema.xml
 to /configs/conf1/schema.xml
   [junit4]   2> 1191505 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
 to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
   [junit4]   2> 1191505 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/stopwords.txt
 to /configs/conf1/stopwords.txt
   [junit4]   2> 1191506 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/protwords.txt
 to /configs/conf1/protwords.txt
   [junit4]   2> 1191506 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/currency.xml
 to /configs/conf1/currency.xml
   [junit4]   2> 1191507 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml
 to /configs/conf1/enumsConfig.xml
   [junit4]   2> 1191507 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json
 to /configs/conf1/open-exchange-rates.json
   [junit4]   2> 1191507 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt
 to /configs/conf1/mapping-ISOLatin1Accent.txt
   [junit4]   2> 1191508 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt
 to /configs/conf1/old_synonyms.txt
   [junit4]   2> 1191508 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
 to /configs/conf1/synonyms.txt
   [junit4]   2> 1191564 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/control-001/cores/collection1
   [junit4]   2> 1191565 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 1191566 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@4f1145f{/u,null,AVAILABLE}
   [junit4]   2> 1191568 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@6976bd0e{HTTP/1.1,[http/1.1]}{127.0.0.1:35005}
   [junit4]   2> 1191568 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.Server Started @1193228ms
   [junit4]   2> 1191568 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/tempDir-001/control/data,
 hostContext=/u, hostPort=38525, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/control-001/cores}
   [junit4]   2> 1191568 ERROR 
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 1191568 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
6.6.0
   [junit4]   2> 1191568 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 1191568 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 1191568 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2017-05-28T10:08:38.544Z
   [junit4]   2> 1191571 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 1191571 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/control-001/solr.xml
   [junit4]   2> 1191575 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 1191576 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:33611/solr
   [junit4]   2> 1191582 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:38525_u    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 1191582 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:38525_u    ] o.a.s.c.OverseerElectionContext I am going to be the 
leader 127.0.0.1:38525_u
   [junit4]   2> 1191583 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:38525_u    ] o.a.s.c.Overseer Overseer 
(id=98039635533955076-127.0.0.1:38525_u-n_0000000000) starting
   [junit4]   2> 1191592 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:38525_u    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:38525_u
   [junit4]   2> 1191592 INFO  
(zkCallback-1811-thread-1-processing-n:127.0.0.1:38525_u) [n:127.0.0.1:38525_u  
  ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 1191627 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:38525_u    ] o.a.s.c.CorePropertiesLocator Found 1 core 
definitions underneath 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/control-001/cores
   [junit4]   2> 1191627 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:38525_u    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 1191628 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] 
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 
transient cores
   [junit4]   2> 1191629 INFO  
(OverseerStateUpdate-98039635533955076-127.0.0.1:38525_u-n_0000000000) 
[n:127.0.0.1:38525_u    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard 
shard=shard1
   [junit4]   2> 1192637 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] o.a.s.c.SolrConfig 
Using Lucene MatchVersion: 6.6.0
   [junit4]   2> 1192645 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] o.a.s.s.IndexSchema 
[collection1] Schema name=test
   [junit4]   2> 1192721 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] o.a.s.s.IndexSchema 
Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 1192726 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] 
o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from 
collection control_collection, trusted=true
   [junit4]   2> 1192726 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] o.a.s.c.SolrCore 
solr.RecoveryStrategy.Builder
   [junit4]   2> 1192726 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] o.a.s.c.SolrCore 
[[collection1] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/control-001/cores/collection1],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/control-001/cores/collection1/data/]
   [junit4]   2> 1192727 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] 
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX 
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@604fde43
   [junit4]   2> 1192727 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.AlcoholicMergePolicy: [AlcoholicMergePolicy: 
minMergeSize=0, mergeFactor=10, maxMergeSize=1208930366, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.1]
   [junit4]   2> 1192730 WARN  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,attributes = 
{initParams=a, name=/dump, class=DumpRequestHandler},args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 1192753 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] 
o.a.s.u.UpdateHandler Using UpdateLog implementation: 
org.apache.solr.update.UpdateLog
   [junit4]   2> 1192753 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1192753 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] 
o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 1192753 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] 
o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 1192754 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: 
minMergeSize=1677721, mergeFactor=25, maxMergeSize=2147483648, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.1705748049811816]
   [junit4]   2> 1192754 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] 
o.a.s.s.SolrIndexSearcher Opening [Searcher@2d88b638[collection1] main]
   [junit4]   2> 1192754 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 1192754 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1192755 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] 
o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 1192755 INFO  
(searcherExecutor-5182-thread-1-processing-n:127.0.0.1:38525_u x:collection1 
c:control_collection) [n:127.0.0.1:38525_u c:control_collection   
x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher 
Searcher@2d88b638[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1192756 INFO  
(coreLoadExecutor-5181-thread-1-processing-n:127.0.0.1:38525_u) 
[n:127.0.0.1:38525_u c:control_collection   x:collection1] o.a.s.u.UpdateLog 
Could not find max version in index or recent updates, using new clock 
1568634169964101632
   [junit4]   2> 1192758 INFO  
(coreZkRegister-5174-thread-1-processing-n:127.0.0.1:38525_u x:collection1 
c:control_collection) [n:127.0.0.1:38525_u c:control_collection s:shard1 
r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas 
found to continue.
   [junit4]   2> 1192758 INFO  
(coreZkRegister-5174-thread-1-processing-n:127.0.0.1:38525_u x:collection1 
c:control_collection) [n:127.0.0.1:38525_u c:control_collection s:shard1 
r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new 
leader - try and sync
   [junit4]   2> 1192758 INFO  
(coreZkRegister-5174-thread-1-processing-n:127.0.0.1:38525_u x:collection1 
c:control_collection) [n:127.0.0.1:38525_u c:control_collection s:shard1 
r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync replicas to 
http://127.0.0.1:38525/u/collection1/
   [junit4]   2> 1192758 INFO  
(coreZkRegister-5174-thread-1-processing-n:127.0.0.1:38525_u x:collection1 
c:control_collection) [n:127.0.0.1:38525_u c:control_collection s:shard1 
r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync 
replicas to me
   [junit4]   2> 1192758 INFO  
(coreZkRegister-5174-thread-1-processing-n:127.0.0.1:38525_u x:collection1 
c:control_collection) [n:127.0.0.1:38525_u c:control_collection s:shard1 
r:core_node1 x:collection1] o.a.s.c.SyncStrategy 
http://127.0.0.1:38525/u/collection1/ has no replicas
   [junit4]   2> 1192759 INFO  
(coreZkRegister-5174-thread-1-processing-n:127.0.0.1:38525_u x:collection1 
c:control_collection) [n:127.0.0.1:38525_u c:control_collection s:shard1 
r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Found all 
replicas participating in election, clear LIR
   [junit4]   2> 1192760 INFO  
(coreZkRegister-5174-thread-1-processing-n:127.0.0.1:38525_u x:collection1 
c:control_collection) [n:127.0.0.1:38525_u c:control_collection s:shard1 
r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new 
leader: http://127.0.0.1:38525/u/collection1/ shard1
   [junit4]   2> 1192910 INFO  
(coreZkRegister-5174-thread-1-processing-n:127.0.0.1:38525_u x:collection1 
c:control_collection) [n:127.0.0.1:38525_u c:control_collection s:shard1 
r:core_node1 x:collection1] o.a.s.c.ZkController I am the leader, no recovery 
necessary
   [junit4]   2> 1193134 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 1193134 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:33611/solr ready
   [junit4]   2> 1193134 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection 
loss:false
   [junit4]   2> 1193134 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase Creating collection1 with stateFormat=2
   [junit4]   2> 1193181 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-1-001/cores/collection1
   [junit4]   2> 1193181 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-1-001
   [junit4]   2> 1193182 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 1193182 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@3bfa671d{/u,null,AVAILABLE}
   [junit4]   2> 1193182 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@3bd1c39f{HTTP/1.1,[http/1.1]}{127.0.0.1:42197}
   [junit4]   2> 1193183 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.Server Started @1194843ms
   [junit4]   2> 1193183 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/tempDir-001/jetty1,
 solrconfig=solrconfig.xml, hostContext=/u, hostPort=34027, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-1-001/cores}
   [junit4]   2> 1193183 ERROR 
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 1193183 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
6.6.0
   [junit4]   2> 1193183 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 1193183 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 1193183 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2017-05-28T10:08:40.159Z
   [junit4]   2> 1193184 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 1193184 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-1-001/solr.xml
   [junit4]   2> 1193188 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 1193188 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:33611/solr
   [junit4]   2> 1193191 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:34027_u    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (0) -> (1)
   [junit4]   2> 1193192 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:34027_u    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 1193192 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:34027_u    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:34027_u
   [junit4]   2> 1193193 INFO  (zkCallback-1815-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 1193193 INFO  
(zkCallback-1811-thread-1-processing-n:127.0.0.1:38525_u) [n:127.0.0.1:38525_u  
  ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 1193193 INFO  
(zkCallback-1821-thread-1-processing-n:127.0.0.1:34027_u) [n:127.0.0.1:34027_u  
  ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 1193250 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:34027_u    ] o.a.s.c.CorePropertiesLocator Found 1 core 
definitions underneath 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-1-001/cores
   [junit4]   2> 1193250 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:34027_u    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 1193251 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] 
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 
transient cores
   [junit4]   2> 1193251 INFO  
(OverseerStateUpdate-98039635533955076-127.0.0.1:38525_u-n_0000000000) 
[n:127.0.0.1:38525_u    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard 
shard=shard2
   [junit4]   2> 1193352 INFO  
(zkCallback-1821-thread-1-processing-n:127.0.0.1:34027_u) [n:127.0.0.1:34027_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeCreated path:/collections/collection1/state.json] 
for collection [collection1] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1194269 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.c.SolrConfig Using 
Lucene MatchVersion: 6.6.0
   [junit4]   2> 1194277 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] Schema name=test
   [junit4]   2> 1194349 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded 
schema test/1.0 with uniqueid field id
   [junit4]   2> 1194354 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1' using configuration from collection 
collection1, trusted=true
   [junit4]   2> 1194354 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.c.SolrCore 
solr.RecoveryStrategy.Builder
   [junit4]   2> 1194354 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.c.SolrCore 
[[collection1] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-1-001/cores/collection1],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-1-001/cores/collection1/data/]
   [junit4]   2> 1194354 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.c.JmxMonitoredMap JMX 
monitoring is enabled. Adding Solr mbeans to JMX Server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@604fde43
   [junit4]   2> 1194355 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.u.RandomMergePolicy 
RandomMergePolicy wrapping class org.apache.lucene.index.AlcoholicMergePolicy: 
[AlcoholicMergePolicy: minMergeSize=0, mergeFactor=10, maxMergeSize=1208930366, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.1]
   [junit4]   2> 1194358 WARN  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.c.RequestHandlers 
INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class 
= DumpRequestHandler,attributes = {initParams=a, name=/dump, 
class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 1194382 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.u.UpdateHandler Using 
UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 1194382 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1194383 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.u.CommitTracker Hard 
AutoCommit: disabled
   [junit4]   2> 1194383 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.u.CommitTracker Soft 
AutoCommit: disabled
   [junit4]   2> 1194383 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.u.RandomMergePolicy 
RandomMergePolicy wrapping class 
org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: 
minMergeSize=1677721, mergeFactor=25, maxMergeSize=2147483648, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.1705748049811816]
   [junit4]   2> 1194383 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.s.SolrIndexSearcher 
Opening [Searcher@5e4b16fe[collection1] main]
   [junit4]   2> 1194384 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 1194384 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1194384 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.h.ReplicationHandler 
Commits will be reserved for  10000
   [junit4]   2> 1194385 INFO  
(searcherExecutor-5193-thread-1-processing-n:127.0.0.1:34027_u x:collection1 
c:collection1) [n:127.0.0.1:34027_u c:collection1   x:collection1] 
o.a.s.c.SolrCore [collection1] Registered new searcher 
Searcher@5e4b16fe[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1194385 INFO  
(coreLoadExecutor-5192-thread-1-processing-n:127.0.0.1:34027_u) 
[n:127.0.0.1:34027_u c:collection1   x:collection1] o.a.s.u.UpdateLog Could not 
find max version in index or recent updates, using new clock 1568634171672231936
   [junit4]   2> 1194388 INFO  
(coreZkRegister-5187-thread-1-processing-n:127.0.0.1:34027_u x:collection1 
c:collection1) [n:127.0.0.1:34027_u c:collection1 s:shard2 r:core_node1 
x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to 
continue.
   [junit4]   2> 1194388 INFO  
(coreZkRegister-5187-thread-1-processing-n:127.0.0.1:34027_u x:collection1 
c:collection1) [n:127.0.0.1:34027_u c:collection1 s:shard2 r:core_node1 
x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try 
and sync
   [junit4]   2> 1194388 INFO  
(coreZkRegister-5187-thread-1-processing-n:127.0.0.1:34027_u x:collection1 
c:collection1) [n:127.0.0.1:34027_u c:collection1 s:shard2 r:core_node1 
x:collection1] o.a.s.c.SyncStrategy Sync replicas to 
http://127.0.0.1:34027/u/collection1/
   [junit4]   2> 1194388 INFO  
(coreZkRegister-5187-thread-1-processing-n:127.0.0.1:34027_u x:collection1 
c:collection1) [n:127.0.0.1:34027_u c:collection1 s:shard2 r:core_node1 
x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 1194388 INFO  
(coreZkRegister-5187-thread-1-processing-n:127.0.0.1:34027_u x:collection1 
c:collection1) [n:127.0.0.1:34027_u c:collection1 s:shard2 r:core_node1 
x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:34027/u/collection1/ has 
no replicas
   [junit4]   2> 1194388 INFO  
(coreZkRegister-5187-thread-1-processing-n:127.0.0.1:34027_u x:collection1 
c:collection1) [n:127.0.0.1:34027_u c:collection1 s:shard2 r:core_node1 
x:collection1] o.a.s.c.ShardLeaderElectionContext Found all replicas 
participating in election, clear LIR
   [junit4]   2> 1194389 INFO  
(coreZkRegister-5187-thread-1-processing-n:127.0.0.1:34027_u x:collection1 
c:collection1) [n:127.0.0.1:34027_u c:collection1 s:shard2 r:core_node1 
x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: 
http://127.0.0.1:34027/u/collection1/ shard2
   [junit4]   2> 1194491 INFO  
(zkCallback-1821-thread-1-processing-n:127.0.0.1:34027_u) [n:127.0.0.1:34027_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [2])
   [junit4]   2> 1194540 INFO  
(coreZkRegister-5187-thread-1-processing-n:127.0.0.1:34027_u x:collection1 
c:collection1) [n:127.0.0.1:34027_u c:collection1 s:shard2 r:core_node1 
x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 1194641 INFO  
(zkCallback-1821-thread-1-processing-n:127.0.0.1:34027_u) [n:127.0.0.1:34027_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [2])
   [junit4]   2> 1194799 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-2-001/cores/collection1
   [junit4]   2> 1194799 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-2-001
   [junit4]   2> 1194800 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 1194801 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@4bc7aad4{/u,null,AVAILABLE}
   [junit4]   2> 1194801 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@62060609{HTTP/1.1,[http/1.1]}{127.0.0.1:33699}
   [junit4]   2> 1194801 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.Server Started @1196462ms
   [junit4]   2> 1194801 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/tempDir-001/jetty2,
 solrconfig=solrconfig.xml, hostContext=/u, hostPort=34847, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-2-001/cores}
   [junit4]   2> 1194802 ERROR 
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 1194802 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
6.6.0
   [junit4]   2> 1194802 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 1194802 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 1194802 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2017-05-28T10:08:41.778Z
   [junit4]   2> 1194804 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 1194804 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-2-001/solr.xml
   [junit4]   2> 1194807 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 1194807 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:33611/solr
   [junit4]   2> 1194810 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:34847_u    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (0) -> (2)
   [junit4]   2> 1194811 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:34847_u    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 1194812 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:34847_u    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:34847_u
   [junit4]   2> 1194812 INFO  
(zkCallback-1827-thread-1-processing-n:127.0.0.1:34847_u) [n:127.0.0.1:34847_u  
  ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 1194812 INFO  
(zkCallback-1811-thread-2-processing-n:127.0.0.1:38525_u) [n:127.0.0.1:38525_u  
  ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 1194812 INFO  
(zkCallback-1821-thread-1-processing-n:127.0.0.1:34027_u) [n:127.0.0.1:34027_u  
  ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 1194812 INFO  (zkCallback-1815-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 1194859 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:34847_u    ] o.a.s.c.CorePropertiesLocator Found 1 core 
definitions underneath 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-2-001/cores
   [junit4]   2> 1194859 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:34847_u    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 1194860 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] 
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 
transient cores
   [junit4]   2> 1194860 INFO  
(OverseerStateUpdate-98039635533955076-127.0.0.1:38525_u-n_0000000000) 
[n:127.0.0.1:38525_u    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard 
shard=shard1
   [junit4]   2> 1194961 INFO  
(zkCallback-1827-thread-1-processing-n:127.0.0.1:34847_u) [n:127.0.0.1:34847_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 1194961 INFO  
(zkCallback-1821-thread-1-processing-n:127.0.0.1:34027_u) [n:127.0.0.1:34027_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 1195867 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.c.SolrConfig Using 
Lucene MatchVersion: 6.6.0
   [junit4]   2> 1195876 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] Schema name=test
   [junit4]   2> 1195951 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded 
schema test/1.0 with uniqueid field id
   [junit4]   2> 1195956 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1' using configuration from collection 
collection1, trusted=true
   [junit4]   2> 1195956 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.c.SolrCore 
solr.RecoveryStrategy.Builder
   [junit4]   2> 1195956 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.c.SolrCore 
[[collection1] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-2-001/cores/collection1],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-2-001/cores/collection1/data/]
   [junit4]   2> 1195957 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.c.JmxMonitoredMap JMX 
monitoring is enabled. Adding Solr mbeans to JMX Server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@604fde43
   [junit4]   2> 1195957 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.u.RandomMergePolicy 
RandomMergePolicy wrapping class org.apache.lucene.index.AlcoholicMergePolicy: 
[AlcoholicMergePolicy: minMergeSize=0, mergeFactor=10, maxMergeSize=1208930366, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.1]
   [junit4]   2> 1195961 WARN  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.c.RequestHandlers 
INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class 
= DumpRequestHandler,attributes = {initParams=a, name=/dump, 
class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 1196000 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.u.UpdateHandler Using 
UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 1196000 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1196001 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.u.CommitTracker Hard 
AutoCommit: disabled
   [junit4]   2> 1196001 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.u.CommitTracker Soft 
AutoCommit: disabled
   [junit4]   2> 1196001 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.u.RandomMergePolicy 
RandomMergePolicy wrapping class 
org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: 
minMergeSize=1677721, mergeFactor=25, maxMergeSize=2147483648, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.1705748049811816]
   [junit4]   2> 1196001 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.s.SolrIndexSearcher 
Opening [Searcher@2b4ef1dc[collection1] main]
   [junit4]   2> 1196002 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 1196002 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1196002 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.h.ReplicationHandler 
Commits will be reserved for  10000
   [junit4]   2> 1196003 INFO  
(searcherExecutor-5204-thread-1-processing-n:127.0.0.1:34847_u x:collection1 
c:collection1) [n:127.0.0.1:34847_u c:collection1   x:collection1] 
o.a.s.c.SolrCore [collection1] Registered new searcher 
Searcher@2b4ef1dc[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1196003 INFO  
(coreLoadExecutor-5203-thread-1-processing-n:127.0.0.1:34847_u) 
[n:127.0.0.1:34847_u c:collection1   x:collection1] o.a.s.u.UpdateLog Could not 
find max version in index or recent updates, using new clock 1568634173368827904
   [junit4]   2> 1196006 INFO  
(coreZkRegister-5198-thread-1-processing-n:127.0.0.1:34847_u x:collection1 
c:collection1) [n:127.0.0.1:34847_u c:collection1 s:shard1 r:core_node2 
x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to 
continue.
   [junit4]   2> 1196006 INFO  
(coreZkRegister-5198-thread-1-processing-n:127.0.0.1:34847_u x:collection1 
c:collection1) [n:127.0.0.1:34847_u c:collection1 s:shard1 r:core_node2 
x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try 
and sync
   [junit4]   2> 1196006 INFO  
(coreZkRegister-5198-thread-1-processing-n:127.0.0.1:34847_u x:collection1 
c:collection1) [n:127.0.0.1:34847_u c:collection1 s:shard1 r:core_node2 
x:collection1] o.a.s.c.SyncStrategy Sync replicas to 
http://127.0.0.1:34847/u/collection1/
   [junit4]   2> 1196006 INFO  
(coreZkRegister-5198-thread-1-processing-n:127.0.0.1:34847_u x:collection1 
c:collection1) [n:127.0.0.1:34847_u c:collection1 s:shard1 r:core_node2 
x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 1196006 INFO  
(coreZkRegister-5198-thread-1-processing-n:127.0.0.1:34847_u x:collection1 
c:collection1) [n:127.0.0.1:34847_u c:collection1 s:shard1 r:core_node2 
x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:34847/u/collection1/ has 
no replicas
   [junit4]   2> 1196006 INFO  
(coreZkRegister-5198-thread-1-processing-n:127.0.0.1:34847_u x:collection1 
c:collection1) [n:127.0.0.1:34847_u c:collection1 s:shard1 r:core_node2 
x:collection1] o.a.s.c.ShardLeaderElectionContext Found all replicas 
participating in election, clear LIR
   [junit4]   2> 1196007 INFO  
(coreZkRegister-5198-thread-1-processing-n:127.0.0.1:34847_u x:collection1 
c:collection1) [n:127.0.0.1:34847_u c:collection1 s:shard1 r:core_node2 
x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: 
http://127.0.0.1:34847/u/collection1/ shard1
   [junit4]   2> 1196108 INFO  
(zkCallback-1821-thread-1-processing-n:127.0.0.1:34027_u) [n:127.0.0.1:34027_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 1196108 INFO  
(zkCallback-1827-thread-1-processing-n:127.0.0.1:34847_u) [n:127.0.0.1:34847_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 1196157 INFO  
(coreZkRegister-5198-thread-1-processing-n:127.0.0.1:34847_u x:collection1 
c:collection1) [n:127.0.0.1:34847_u c:collection1 s:shard1 r:core_node2 
x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 1196259 INFO  
(zkCallback-1827-thread-1-processing-n:127.0.0.1:34847_u) [n:127.0.0.1:34847_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 1196259 INFO  
(zkCallback-1821-thread-1-processing-n:127.0.0.1:34027_u) [n:127.0.0.1:34027_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 1196407 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-3-001/cores/collection1
   [junit4]   2> 1196407 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-3-001
   [junit4]   2> 1196407 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 1196408 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@1888f4e2{/u,null,AVAILABLE}
   [junit4]   2> 1196408 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@5bc699d7{HTTP/1.1,[http/1.1]}{127.0.0.1:33323}
   [junit4]   2> 1196409 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.Server Started @1198069ms
   [junit4]   2> 1196409 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/tempDir-001/jetty3,
 solrconfig=solrconfig.xml, hostContext=/u, hostPort=33471, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-3-001/cores}
   [junit4]   2> 1196409 ERROR 
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 1196409 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
6.6.0
   [junit4]   2> 1196409 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 1196409 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 1196409 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2017-05-28T10:08:43.385Z
   [junit4]   2> 1196411 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 1196411 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-3-001/solr.xml
   [junit4]   2> 1196415 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 1196416 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:33611/solr
   [junit4]   2> 1196420 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:33471_u    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (0) -> (3)
   [junit4]   2> 1196420 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:33471_u    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 1196421 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:33471_u    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:33471_u
   [junit4]   2> 1196422 INFO  
(zkCallback-1811-thread-2-processing-n:127.0.0.1:38525_u) [n:127.0.0.1:38525_u  
  ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 1196422 INFO  
(zkCallback-1827-thread-1-processing-n:127.0.0.1:34847_u) [n:127.0.0.1:34847_u  
  ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 1196422 INFO  (zkCallback-1815-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 1196422 INFO  
(zkCallback-1833-thread-1-processing-n:127.0.0.1:33471_u) [n:127.0.0.1:33471_u  
  ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 1196422 INFO  
(zkCallback-1821-thread-1-processing-n:127.0.0.1:34027_u) [n:127.0.0.1:34027_u  
  ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 1196447 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:33471_u    ] o.a.s.c.CorePropertiesLocator Found 1 core 
definitions underneath 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-3-001/cores
   [junit4]   2> 1196447 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) 
[n:127.0.0.1:33471_u    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 1196448 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] 
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 
transient cores
   [junit4]   2> 1196449 INFO  
(OverseerStateUpdate-98039635533955076-127.0.0.1:38525_u-n_0000000000) 
[n:127.0.0.1:38525_u    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard 
shard=shard2
   [junit4]   2> 1196550 INFO  
(zkCallback-1821-thread-1-processing-n:127.0.0.1:34027_u) [n:127.0.0.1:34027_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 1196550 INFO  
(zkCallback-1827-thread-1-processing-n:127.0.0.1:34847_u) [n:127.0.0.1:34847_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 1196550 INFO  
(zkCallback-1833-thread-1-processing-n:127.0.0.1:33471_u) [n:127.0.0.1:33471_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 1197454 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.c.SolrConfig Using 
Lucene MatchVersion: 6.6.0
   [junit4]   2> 1197461 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] Schema name=test
   [junit4]   2> 1197532 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded 
schema test/1.0 with uniqueid field id
   [junit4]   2> 1197536 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1' using configuration from collection 
collection1, trusted=true
   [junit4]   2> 1197536 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.c.SolrCore 
solr.RecoveryStrategy.Builder
   [junit4]   2> 1197536 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.c.SolrCore 
[[collection1] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-3-001/cores/collection1],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001/shard-3-001/cores/collection1/data/]
   [junit4]   2> 1197536 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.c.JmxMonitoredMap JMX 
monitoring is enabled. Adding Solr mbeans to JMX Server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@604fde43
   [junit4]   2> 1197537 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.u.RandomMergePolicy 
RandomMergePolicy wrapping class org.apache.lucene.index.AlcoholicMergePolicy: 
[AlcoholicMergePolicy: minMergeSize=0, mergeFactor=10, maxMergeSize=1208930366, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.1]
   [junit4]   2> 1197539 WARN  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.c.RequestHandlers 
INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class 
= DumpRequestHandler,attributes = {initParams=a, name=/dump, 
class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 1197560 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.u.UpdateHandler Using 
UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 1197560 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1197560 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.u.CommitTracker Hard 
AutoCommit: disabled
   [junit4]   2> 1197560 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.u.CommitTracker Soft 
AutoCommit: disabled
   [junit4]   2> 1197561 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.u.RandomMergePolicy 
RandomMergePolicy wrapping class 
org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: 
minMergeSize=1677721, mergeFactor=25, maxMergeSize=2147483648, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.1705748049811816]
   [junit4]   2> 1197561 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.s.SolrIndexSearcher 
Opening [Searcher@3d517c7d[collection1] main]
   [junit4]   2> 1197561 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 1197561 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1197561 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.h.ReplicationHandler 
Commits will be reserved for  10000
   [junit4]   2> 1197562 INFO  
(searcherExecutor-5215-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
c:collection1) [n:127.0.0.1:33471_u c:collection1   x:collection1] 
o.a.s.c.SolrCore [collection1] Registered new searcher 
Searcher@3d517c7d[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1197563 INFO  
(coreLoadExecutor-5214-thread-1-processing-n:127.0.0.1:33471_u) 
[n:127.0.0.1:33471_u c:collection1   x:collection1] o.a.s.u.UpdateLog Could not 
find max version in index or recent updates, using new clock 1568634175004606464
   [junit4]   2> 1197564 INFO  
(coreZkRegister-5209-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
c:collection1) [n:127.0.0.1:33471_u c:collection1 s:shard2 r:core_node3 
x:collection1] o.a.s.c.ZkController Core needs to recover:collection1
   [junit4]   2> 1197565 INFO  
(updateExecutor-1830-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.u.DefaultSolrCoreState Running 
recovery
   [junit4]   2> 1197565 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Starting recovery 
process. recoveringAfterStartup=true
   [junit4]   2> 1197565 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy ###### 
startupVersions=[[]]
   [junit4]   2> 1197565 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Begin buffering 
updates. core=[collection1]
   [junit4]   2> 1197565 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.u.UpdateLog Starting to buffer 
updates. FSUpdateLog{state=ACTIVE, tlog=null}
   [junit4]   2> 1197565 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Publishing state 
of core [collection1] as recovering, leader is 
[http://127.0.0.1:34027/u/collection1/] and I am 
[http://127.0.0.1:33471/u/collection1/]
   [junit4]   2> 1197566 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Sending prep 
recovery command to [http://127.0.0.1:34027/u]; [WaitForState: 
action=PREPRECOVERY&core=collection1&nodeName=127.0.0.1:33471_u&coreNodeName=core_node3&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
   [junit4]   2> 1197567 INFO  (SocketProxy-Acceptor-34027) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=50848,localport=34027], receiveBufferSize:531000
   [junit4]   2> 1197567 INFO  (SocketProxy-Acceptor-34027) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=42197,localport=40304], receiveBufferSize=530904
   [junit4]   2> 1197567 INFO  (qtp704935349-10961) [n:127.0.0.1:34027_u    ] 
o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node3, state: 
recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true
   [junit4]   2> 1197568 INFO  (qtp704935349-10961) [n:127.0.0.1:34027_u    ] 
o.a.s.h.a.PrepRecoveryOp Will wait a max of 183 seconds to see collection1 
(shard2 of collection1) have state: recovering
   [junit4]   2> 1197568 INFO  (qtp704935349-10961) [n:127.0.0.1:34027_u    ] 
o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, 
shard=shard2, thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? 
true, live=true, checkLive=true, currentState=down, localState=active, 
nodeName=127.0.0.1:33471_u, coreNodeName=core_node3, 
onlyIfActiveCheckResult=false, nodeProps: 
core_node3:{"core":"collection1","base_url":"http://127.0.0.1:33471/u","node_name":"127.0.0.1:33471_u","state":"down"}
   [junit4]   2> 1197667 INFO  
(zkCallback-1827-thread-1-processing-n:127.0.0.1:34847_u) [n:127.0.0.1:34847_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 1197667 INFO  
(zkCallback-1833-thread-1-processing-n:127.0.0.1:33471_u) [n:127.0.0.1:33471_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 1197667 INFO  
(zkCallback-1821-thread-1-processing-n:127.0.0.1:34027_u) [n:127.0.0.1:34027_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 1197953 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.SolrTestCaseJ4 ###Starting test
   [junit4]   2> 1197953 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase Wait for recoveries to finish - wait 
30000 for each attempt
   [junit4]   2> 1197953 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection: 
collection1 failOnTimeout:true timeout (sec):30000
   [junit4]   2> 1198568 INFO  (qtp704935349-10961) [n:127.0.0.1:34027_u    ] 
o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, 
shard=shard2, thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? 
true, live=true, checkLive=true, currentState=recovering, localState=active, 
nodeName=127.0.0.1:33471_u, coreNodeName=core_node3, 
onlyIfActiveCheckResult=false, nodeProps: 
core_node3:{"core":"collection1","base_url":"http://127.0.0.1:33471/u","node_name":"127.0.0.1:33471_u","state":"recovering"}
   [junit4]   2> 1198568 INFO  (qtp704935349-10961) [n:127.0.0.1:34027_u    ] 
o.a.s.h.a.PrepRecoveryOp Waited coreNodeName: core_node3, state: recovering, 
checkLive: true, onlyIfLeader: true for: 1 seconds.
   [junit4]   2> 1198568 INFO  (qtp704935349-10961) [n:127.0.0.1:34027_u    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores 
params={nodeName=127.0.0.1:33471_u&onlyIfLeaderActive=true&core=collection1&coreNodeName=core_node3&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2}
 status=0 QTime=1000
   [junit4]   2> 1199068 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Attempting to 
PeerSync from [http://127.0.0.1:34027/u/collection1/] - 
recoveringAfterStartup=[true]
   [junit4]   2> 1199069 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.u.PeerSync PeerSync: 
core=collection1 url=http://127.0.0.1:33471/u START 
replicas=[http://127.0.0.1:34027/u/collection1/] nUpdates=100
   [junit4]   2> 1199070 INFO  (SocketProxy-Acceptor-34027) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=50852,localport=34027], receiveBufferSize:531000
   [junit4]   2> 1199070 INFO  (SocketProxy-Acceptor-34027) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=42197,localport=40308], receiveBufferSize=530904
   [junit4]   2> 1199073 INFO  (qtp704935349-10961) [n:127.0.0.1:34027_u 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.IndexFingerprint 
IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, 
maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, 
maxDoc=0}
   [junit4]   2> 1199073 INFO  (qtp704935349-10961) [n:127.0.0.1:34027_u 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request 
[collection1]  webapp=/u path=/get 
params={distrib=false&qt=/get&getFingerprint=9223372036854775807&wt=javabin&version=2}
 status=0 QTime=0
   [junit4]   2> 1199073 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.u.IndexFingerprint IndexFingerprint 
millis:0.0 result:{maxVersionSpecified=9223372036854775807, 
maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, 
maxDoc=0}
   [junit4]   2> 1199073 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.u.PeerSync We are already in sync. 
No need to do a PeerSync 
   [junit4]   2> 1199073 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1199073 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 No 
uncommitted changes. Skipping IW.commit.
   [junit4]   2> 1199073 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 
end_commit_flush
   [junit4]   2> 1199074 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy PeerSync stage of 
recovery was successful.
   [junit4]   2> 1199074 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Replaying updates 
buffered during PeerSync.
   [junit4]   2> 1199074 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy No replay needed.
   [junit4]   2> 1199074 INFO  
(recoveryExecutor-1831-thread-1-processing-n:127.0.0.1:33471_u x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33471_u c:collection1 
s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Registering as 
Active after recovery.
   [junit4]   2> 1199176 INFO  
(zkCallback-1833-thread-1-processing-n:127.0.0.1:33471_u) [n:127.0.0.1:33471_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 1199176 INFO  
(zkCallback-1827-thread-1-processing-n:127.0.0.1:34847_u) [n:127.0.0.1:34847_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 1199176 INFO  
(zkCallback-1821-thread-1-processing-n:127.0.0.1:34027_u) [n:127.0.0.1:34027_u  
  ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 1199954 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.AbstractDistribZkTestBase Recoveries finished - collection: collection1
   [junit4]   2> 1199954 INFO  (SocketProxy-Acceptor-38525) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=33938,localport=38525], receiveBufferSize:531000
   [junit4]   2> 1199955 INFO  (SocketProxy-Acceptor-38525) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=35005,localport=60828], receiveBufferSize=530904
   [junit4]   2> 1199955 INFO  (qtp1159462928-10923) [n:127.0.0.1:38525_u 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.DirectUpdateHandler2 start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1199955 INFO  (qtp1159462928-10923) [n:127.0.0.1:38525_u 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 1199956 INFO  (qtp1159462928-10923) [n:127.0.0.1:38525_u 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 1199956 INFO  (qtp1159462928-10923) [n:127.0.0.1:38525_u 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp=/u path=/update 
params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=}
 0 0
   [junit4]   2> 1199956 INFO  (SocketProxy-Acceptor-34027) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=50868,localport=34027], receiveBufferSize:531000
   [junit4]   2> 1199957 INFO  (SocketProxy-Acceptor-34027) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=42197,localport=40326], receiveBufferSize=530904
   [junit4]   2> 1199958 INFO  (SocketProxy-Acceptor-34027) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=50876,localport=34027], receiveBufferSize:531000
   [junit4]   2> 1199958 INFO  (SocketProxy-Acceptor-34847) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=51780,localport=34847], receiveBufferSize:531000
   [junit4]   2> 1199958 INFO  (SocketProxy-Acceptor-33471) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=52158,localport=33471], receiveBufferSize:531000
   [junit4]   2> 1199959 INFO  (SocketProxy-Acceptor-34027) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=42197,localport=40334], receiveBufferSize=530904
   [junit4]   2> 1199959 INFO  (qtp704935349-10957) [n:127.0.0.1:34027_u 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 
start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1199959 INFO  (qtp704935349-10957) [n:127.0.0.1:34027_u 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 
No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 1199959 INFO  (SocketProxy-Acceptor-33471) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=33323,localport=55912], receiveBufferSize=530904
   [junit4]   2> 1199959 INFO  (qtp1321347135-11020) [n:127.0.0.1:33471_u 
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 
start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1199960 INFO  (qtp1321347135-11020) [n:127.0.0.1:33471_u 
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 
No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 1199960 INFO  (qtp1321347135-11020) [n:127.0.0.1:33471_u 
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 
end_commit_flush
   [junit4]   2> 1199960 INFO  (qtp1321347135-11020) [n:127.0.0.1:33471_u 
c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp=/u path=/update 
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:34027/u/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
 0 0
   [junit4]   2> 1199964 INFO  (qtp704935349-10957) [n:127.0.0.1:34027_u 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 
end_commit_flush
   [junit4]   2> 1199964 INFO  (SocketProxy-Acceptor-34847) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=33699,localport=58166], receiveBufferSize=530904
   [junit4]   2> 1199964 INFO  (qtp704935349-10957) [n:127.0.0.1:34027_u 
c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp=/u path=/update 
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:34027/u/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
 0 5
   [junit4]   2> 1199964 INFO  (qtp684847818-10991) [n:127.0.0.1:34847_u 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 
start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1199964 INFO  (qtp684847818-10991) [n:127.0.0.1:34847_u 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 
No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 1199964 INFO  (qtp684847818-10991) [n:127.0.0.1:34847_u 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 
end_commit_flush
   [junit4]   2> 1199964 INFO  (qtp684847818-10991) [n:127.0.0.1:34847_u 
c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp=/u path=/update 
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:34027/u/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
 0 0
   [junit4]   2> 1199964 INFO  (qtp704935349-10958) [n:127.0.0.1:34027_u 
c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp=/u path=/update 
params={_stateVer_=collection1:8&waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=}
 0 7
   [junit4]   2> 1199965 INFO  (SocketProxy-Acceptor-34027) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=50888,localport=34027], receiveBufferSize:531000
   [junit4]   2> 1199966 INFO  (qtp704935349-10956) [n:127.0.0.1:34027_u 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request 
[collection1]  webapp=/u path=/select 
params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2}
 hits=0 status=0 QTime=0
   [junit4]   2> 1199966 INFO  (SocketProxy-Acceptor-34027) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=42197,localport=40344], receiveBufferSize=530904
   [junit4]   2> 1199966 INFO  (SocketProxy-Acceptor-33471) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=52166,localport=33471], receiveBufferSize:531000
   [junit4]   2> 1199966 INFO  (SocketProxy-Acceptor-33471) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=33323,localport=55920], receiveBufferSize=530904
   [junit4]   2> 1199966 INFO  (qtp1321347135-11022) [n:127.0.0.1:33471_u 
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.S.Request 
[collection1]  webapp=/u path=/select 
params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2}
 hits=0 status=0 QTime=0
   [junit4]   2> 1199967 INFO  (SocketProxy-Acceptor-34847) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=51800,localport=34847], receiveBufferSize:531000
   [junit4]   2> 1199967 INFO  (SocketProxy-Acceptor-34847) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=33699,localport=58182], receiveBufferSize=530904
   [junit4]   2> 1199968 INFO  (qtp684847818-10993) [n:127.0.0.1:34847_u 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.S.Request 
[collection1]  webapp=/u path=/select par

[...truncated too long message...]

.c.ZkStateReader ZooKeeper watch triggered, but Solr cannot talk to ZK: 
[KeeperErrorCode = Session expired for /live_nodes]
   [junit4]   2> 1327170 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.AbstractConnector Stopped 
ServerConnector@5bc699d7{HTTP/1.1,[http/1.1]}{127.0.0.1:0}
   [junit4]   2> 1327170 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.e.j.s.h.ContextHandler Stopped 
o.e.j.s.ServletContextHandler@1888f4e2{/u,null,UNAVAILABLE}
   [junit4]   2> 1327170 ERROR 
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper 
server won't take any action on ERROR or SHUTDOWN server state changes
   [junit4]   2> 1327171 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.ZkTestServer connecting to 127.0.0.1:33611 33611
   [junit4]   2> 1327199 INFO  (Thread-2350) [    ] o.a.s.c.ZkTestServer 
connecting to 127.0.0.1:33611 33611
   [junit4]   2> 1327199 WARN  (Thread-2350) [    ] o.a.s.c.ZkTestServer Watch 
limit violations: 
   [junit4]   2> Maximum concurrent create/delete watches above limit:
   [junit4]   2> 
   [junit4]   2>        5       /solr/aliases.json
   [junit4]   2>        5       /solr/clusterprops.json
   [junit4]   2>        4       /solr/security.json
   [junit4]   2>        4       /solr/configs/conf1
   [junit4]   2> 
   [junit4]   2> Maximum concurrent data watches above limit:
   [junit4]   2> 
   [junit4]   2>        5       /solr/clusterstate.json
   [junit4]   2>        3       /solr/collections/c8n_1x3_lf/state.json
   [junit4]   2>        3       /solr/collections/collection1/state.json
   [junit4]   2>        2       
/solr/overseer_elect/election/98039635533955081-127.0.0.1:34027_u-n_0000000001
   [junit4]   2> 
   [junit4]   2> Maximum concurrent children watches above limit:
   [junit4]   2> 
   [junit4]   2>        5       /solr/live_nodes
   [junit4]   2>        5       /solr/collections
   [junit4]   2>        3       /solr/overseer/queue
   [junit4]   2>        3       /solr/overseer/collection-queue-work
   [junit4]   2> 
   [junit4]   2> 1327199 WARN  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.SocketProxy Closing 2 connections to: http://127.0.0.1:33471/u, target: 
http://127.0.0.1:33323/u
   [junit4]   2> 1327199 WARN  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.SocketProxy Closing 0 connections to: http://127.0.0.1:34847/u, target: 
http://127.0.0.1:33699/u
   [junit4]   2> 1327199 WARN  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.SocketProxy Closing 15 connections to: http://127.0.0.1:38525/u, 
target: http://127.0.0.1:35005/u
   [junit4]   2> 1327200 WARN  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30F4333592284485]) [    ] 
o.a.s.c.SocketProxy Closing 1 connections to: http://127.0.0.1:34027/u, target: 
http://127.0.0.1:42197/u
   [junit4]   2> NOTE: reproduce with: ant test  
-Dtestcase=LeaderFailoverAfterPartitionTest -Dtests.method=test 
-Dtests.seed=30F4333592284485 -Dtests.multiplier=3 -Dtests.slow=true 
-Dtests.locale=es-CO -Dtests.timezone=Europe/Kirov -Dtests.asserts=true 
-Dtests.file.encoding=ISO-8859-1
   [junit4] FAILURE  136s J2 | LeaderFailoverAfterPartitionTest.test <<<
   [junit4]    > Throwable #1: java.lang.AssertionError: Expected 2 of 3 
replicas to be active but only found 1; 
[core_node3:{"core":"c8n_1x3_lf_shard1_replica1","base_url":"http://127.0.0.1:38525/u","node_name":"127.0.0.1:38525_u","state":"active","leader":"true"}];
 clusterState: DocCollection(c8n_1x3_lf//collections/c8n_1x3_lf/state.json/17)={
   [junit4]    >   "replicationFactor":"3",
   [junit4]    >   "shards":{"shard1":{
   [junit4]    >       "range":"80000000-7fffffff",
   [junit4]    >       "state":"active",
   [junit4]    >       "replicas":{
   [junit4]    >         "core_node1":{
   [junit4]    >           "state":"down",
   [junit4]    >           "base_url":"http://127.0.0.1:34027/u";,
   [junit4]    >           "core":"c8n_1x3_lf_shard1_replica3",
   [junit4]    >           "node_name":"127.0.0.1:34027_u"},
   [junit4]    >         "core_node2":{
   [junit4]    >           "core":"c8n_1x3_lf_shard1_replica2",
   [junit4]    >           "base_url":"http://127.0.0.1:34847/u";,
   [junit4]    >           "node_name":"127.0.0.1:34847_u",
   [junit4]    >           "state":"down"},
   [junit4]    >         "core_node3":{
   [junit4]    >           "core":"c8n_1x3_lf_shard1_replica1",
   [junit4]    >           "base_url":"http://127.0.0.1:38525/u";,
   [junit4]    >           "node_name":"127.0.0.1:38525_u",
   [junit4]    >           "state":"active",
   [junit4]    >           "leader":"true"}}}},
   [junit4]    >   "router":{"name":"compositeId"},
   [junit4]    >   "maxShardsPerNode":"1",
   [junit4]    >   "autoAddReplicas":"false"}
   [junit4]    >        at 
__randomizedtesting.SeedInfo.seed([30F4333592284485:B8A00CEF3CD4297D]:0)
   [junit4]    >        at 
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:168)
   [junit4]    >        at 
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:55)
   [junit4]    >        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:992)
   [junit4]    >        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:967)
   [junit4]    >        at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> 1327201 INFO  
(SUITE-LeaderFailoverAfterPartitionTest-seed#[30F4333592284485]-worker) [    ] 
o.a.s.SolrTestCaseJ4 ###deleteCore
   [junit4]   2> NOTE: leaving temporary files on disk at: 
/home/jenkins/workspace/Lucene-Solr-6.6-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30F4333592284485-001
   [junit4]   2> May 28, 2017 10:10:54 AM 
com.carrotsearch.randomizedtesting.ThreadLeakControl checkThreadLeaks
   [junit4]   2> WARNING: Will linger awaiting termination of 1 leaked 
thread(s).
   [junit4]   2> NOTE: test params are: codec=Asserting(Lucene62): 
{multiDefault=BlockTreeOrds(blocksize=128), a_t=PostingsFormat(name=Asserting), 
id=PostingsFormat(name=Direct), text=PostingsFormat(name=MockRandom)}, 
docValues:{range_facet_l_dv=DocValuesFormat(name=Lucene54), 
_version_=DocValuesFormat(name=Asserting), 
intDefault=DocValuesFormat(name=Asserting), 
range_facet_i_dv=DocValuesFormat(name=Asserting), 
intDvoDefault=DocValuesFormat(name=Direct), 
range_facet_l=DocValuesFormat(name=Asserting), 
timestamp=DocValuesFormat(name=Asserting)}, maxPointsInLeafNode=258, 
maxMBSortInHeap=6.038177573931958, 
sim=RandomSimilarity(queryNorm=false,coord=yes): {}, locale=es-CO, 
timezone=Europe/Kirov
   [junit4]   2> NOTE: Linux 4.10.0-21-generic amd64/Oracle Corporation 
1.8.0_131 (64-bit)/cpus=8,threads=1,free=217110896,total=518979584
   [junit4]   2> NOTE: All tests run in this JVM: [TestPostingsSolrHighlighter, 
TestRestoreCore, TestSolrXml, FullHLLTest, TestDFRSimilarityFactory, 
FieldMutatingUpdateProcessorTest, TestAnalyzedSuggestions, 
DeleteLastCustomShardedReplicaTest, QueryResultKeyTest, 
XmlUpdateRequestHandlerTest, TestNRTOpen, SimpleFacetsTest, 
TestDistribDocBasedVersion, TestComplexPhraseQParserPlugin, 
TestTolerantUpdateProcessorRandomCloud, TestExpandComponent, 
TestSerializedLuceneMatchVersion, TestSimpleQParserPlugin, BJQParserTest, 
OverseerTest, TestSolrCoreParser, TestFieldCacheWithThreads, 
PropertiesRequestHandlerTest, DocValuesMultiTest, TestFieldCacheSort, 
PreAnalyzedFieldTest, TestExactStatsCache, JSONWriterTest, TestLFUCache, 
RollingRestartTest, TestUseDocValuesAsStored, ReplicaListTransformerTest, 
TestMergePolicyConfig, TestXmlQParserPlugin, TestFileDictionaryLookup, 
ZkStateWriterTest, TestRandomCollapseQParserPlugin, TestRecovery, 
SolrSlf4jReporterTest, PrimitiveFieldTypeTest, StressHdfsTest, 
SpellPossibilityIteratorTest, TestRequestStatusCollectionAPI, 
ManagedSchemaRoundRobinCloudTest, QueryParsingTest, BlockJoinFacetSimpleTest, 
ShowFileRequestHandlerTest, TestUtils, TestManagedResourceStorage, 
TestMultiWordSynonyms, TermVectorComponentDistributedTest, 
ParsingFieldUpdateProcessorsTest, NoCacheHeaderTest, CoreAdminHandlerTest, 
TestMinMaxOnMultiValuedField, RulesTest, 
StatelessScriptUpdateProcessorFactoryTest, TestInitQParser, ShardSplitTest, 
UnloadDistributedZkTest, TestReload, TestSolrDeletionPolicy1, 
TestClassNameShortening, TestCorePropertiesReload, TestStressUserVersions, 
TestGeoJSONResponseWriter, BasicAuthStandaloneTest, TestStressReorder, 
TestRebalanceLeaders, PeerSyncWithIndexFingerprintCachingTest, 
HttpPartitionTest, TestDelegationWithHadoopAuth, 
BigEndianAscendingWordSerializerTest, JavabinLoaderTest, 
VMParamsZkACLAndCredentialsProvidersTest, FieldAnalysisRequestHandlerTest, 
OpenExchangeRatesOrgProviderTest, TestCloudInspectUtil, 
DistributedVersionInfoTest, TestShortCircuitedRequests, 
LeaderFailureAfterFreshStartTest, DistributedSuggestComponentTest, 
TestCoreDiscovery, TestSlowCompositeReaderWrapper, TestBinaryResponseWriter, 
PrimUtilsTest, TestGroupingSearch, LukeRequestHandlerTest, 
TestMiniSolrCloudCluster, OverseerRolesTest, TestJsonRequest, BadComponentTest, 
TestEmbeddedSolrServerSchemaAPI, TestZkChroot, 
CloudExitableDirectoryReaderTest, TestTolerantSearch, DOMUtilTest, 
CreateCollectionCleanupTest, SolrXmlInZkTest, AnalysisErrorHandlingTest, 
TestRecoveryHdfs, TestTestInjection, TestDynamicFieldResource, TestBinaryField, 
TestNestedDocsSort, DirectoryFactoryTest, MultiThreadedOCPTest, 
StatsReloadRaceTest, TestUniqueKeyFieldResource, SparseHLLTest, 
TestSolrQueryParser, HttpSolrCallGetCoreTest, TestConfigSetImmutable, 
TestSchemaResource, TestLMJelinekMercerSimilarityFactory, CollectionReloadTest, 
CollectionTooManyReplicasTest, DistribCursorPagingTest, 
LeaderFailoverAfterPartitionTest]
   [junit4] Completed [524/711 (1!)] on J2 in 136.82s, 1 test, 1 failure <<< 
FAILURES!

[...truncated 44115 lines...]
---------------------------------------------------------------------
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org

Reply via email to