Build: https://jenkins.thetaphi.de/job/Lucene-Solr-6.x-Linux/2435/
Java: 32bit/jdk1.8.0_112 -server -XX:+UseSerialGC

1 tests failed.
FAILED:  org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test

Error Message:
Expected 2 of 3 replicas to be active but only found 1; 
[core_node2:{"core":"c8n_1x3_lf_shard1_replica3","base_url":"http://127.0.0.1:41352","node_name":"127.0.0.1:41352_","state":"active","leader":"true"}];
 clusterState: 
DocCollection(c8n_1x3_lf//collections/c8n_1x3_lf/state.json/17)={   
"replicationFactor":"3",   "shards":{"shard1":{       
"range":"80000000-7fffffff",       "state":"active",       "replicas":{         
"core_node1":{           "state":"down",           
"base_url":"http://127.0.0.1:36443";,           
"core":"c8n_1x3_lf_shard1_replica1",           "node_name":"127.0.0.1:36443_"}, 
        "core_node2":{           "core":"c8n_1x3_lf_shard1_replica3",           
"base_url":"http://127.0.0.1:41352";,           "node_name":"127.0.0.1:41352_",  
         "state":"active",           "leader":"true"},         "core_node3":{   
        "core":"c8n_1x3_lf_shard1_replica2",           
"base_url":"http://127.0.0.1:37726";,           "node_name":"127.0.0.1:37726_",  
         "state":"down"}}}},   "router":{"name":"compositeId"},   
"maxShardsPerNode":"1",   "autoAddReplicas":"false"}

Stack Trace:
java.lang.AssertionError: Expected 2 of 3 replicas to be active but only found 
1; 
[core_node2:{"core":"c8n_1x3_lf_shard1_replica3","base_url":"http://127.0.0.1:41352","node_name":"127.0.0.1:41352_","state":"active","leader":"true"}];
 clusterState: DocCollection(c8n_1x3_lf//collections/c8n_1x3_lf/state.json/17)={
  "replicationFactor":"3",
  "shards":{"shard1":{
      "range":"80000000-7fffffff",
      "state":"active",
      "replicas":{
        "core_node1":{
          "state":"down",
          "base_url":"http://127.0.0.1:36443";,
          "core":"c8n_1x3_lf_shard1_replica1",
          "node_name":"127.0.0.1:36443_"},
        "core_node2":{
          "core":"c8n_1x3_lf_shard1_replica3",
          "base_url":"http://127.0.0.1:41352";,
          "node_name":"127.0.0.1:41352_",
          "state":"active",
          "leader":"true"},
        "core_node3":{
          "core":"c8n_1x3_lf_shard1_replica2",
          "base_url":"http://127.0.0.1:37726";,
          "node_name":"127.0.0.1:37726_",
          "state":"down"}}}},
  "router":{"name":"compositeId"},
  "maxShardsPerNode":"1",
  "autoAddReplicas":"false"}
        at 
__randomizedtesting.SeedInfo.seed([4252C772B8A8F31F:CA06F8A816549EE7]:0)
        at org.junit.Assert.fail(Assert.java:93)
        at org.junit.Assert.assertTrue(Assert.java:43)
        at 
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:168)
        at 
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:55)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:992)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:967)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:811)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:462)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367)
        at java.lang.Thread.run(Thread.java:745)




Build Log:
[...truncated 11187 lines...]
   [junit4] Suite: org.apache.solr.cloud.LeaderFailoverAfterPartitionTest
   [junit4]   2> Creating dataDir: 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/init-core-data-001
   [junit4]   2> 212850 INFO  
(SUITE-LeaderFailoverAfterPartitionTest-seed#[4252C772B8A8F31F]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via: 
@org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl=https://issues.apache.org/jira/browse/SOLR-5776)
   [junit4]   2> 212850 INFO  
(SUITE-LeaderFailoverAfterPartitionTest-seed#[4252C772B8A8F31F]-worker) [    ] 
o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /
   [junit4]   2> 212854 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 212854 INFO  (Thread-577) [    ] o.a.s.c.ZkTestServer client 
port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 212854 INFO  (Thread-577) [    ] o.a.s.c.ZkTestServer Starting 
server
   [junit4]   2> 212954 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.ZkTestServer start zk server on port:34370
   [junit4]   2> 212961 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
 to /configs/conf1/solrconfig.xml
   [junit4]   2> 212962 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/schema.xml
 to /configs/conf1/schema.xml
   [junit4]   2> 212963 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
 to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
   [junit4]   2> 212964 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/stopwords.txt
 to /configs/conf1/stopwords.txt
   [junit4]   2> 212964 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/protwords.txt
 to /configs/conf1/protwords.txt
   [junit4]   2> 212965 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/currency.xml
 to /configs/conf1/currency.xml
   [junit4]   2> 212965 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml
 to /configs/conf1/enumsConfig.xml
   [junit4]   2> 212966 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json
 to /configs/conf1/open-exchange-rates.json
   [junit4]   2> 212966 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt
 to /configs/conf1/mapping-ISOLatin1Accent.txt
   [junit4]   2> 212967 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt
 to /configs/conf1/old_synonyms.txt
   [junit4]   2> 212967 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
 to /configs/conf1/synonyms.txt
   [junit4]   2> 213038 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/control-001/cores/collection1
   [junit4]   2> 213039 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 213040 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@1209c76{/,null,AVAILABLE}
   [junit4]   2> 213042 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@fda947{HTTP/1.1,[http/1.1]}{127.0.0.1:44927}
   [junit4]   2> 213042 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.Server Started @215333ms
   [junit4]   2> 213042 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/tempDir-001/control/data,
 hostContext=/, hostPort=41352, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/control-001/cores}
   [junit4]   2> 213043 ERROR 
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 213043 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
6.4.0
   [junit4]   2> 213043 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 213043 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 213043 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2016-12-16T13:20:49.003Z
   [junit4]   2> 213045 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 213045 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/control-001/solr.xml
   [junit4]   2> 213051 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 213051 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:34370/solr
   [junit4]   2> 213063 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:41352_    ] o.a.s.c.OverseerElectionContext I am going to be the 
leader 127.0.0.1:41352_
   [junit4]   2> 213063 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:41352_    ] o.a.s.c.Overseer Overseer 
(id=97117434597539844-127.0.0.1:41352_-n_0000000000) starting
   [junit4]   2> 213067 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:41352_    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:41352_
   [junit4]   2> 213069 INFO  
(zkCallback-424-thread-1-processing-n:127.0.0.1:41352_) [n:127.0.0.1:41352_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 213193 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:41352_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions 
underneath 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/control-001/cores
   [junit4]   2> 213193 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:41352_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 213195 INFO  
(OverseerStateUpdate-97117434597539844-127.0.0.1:41352_-n_0000000000) 
[n:127.0.0.1:41352_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard 
shard=shard1
   [junit4]   2> 214214 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] o.a.s.c.SolrConfig 
Using Lucene MatchVersion: 6.4.0
   [junit4]   2> 214225 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] o.a.s.s.IndexSchema 
[collection1] Schema name=test
   [junit4]   2> 214325 WARN  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] o.a.s.s.IndexSchema 
[collection1] default search field in schema is text. WARNING: Deprecated, 
please use 'df' on request instead.
   [junit4]   2> 214329 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] o.a.s.s.IndexSchema 
Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 214336 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1' using configuration from collection 
control_collection
   [junit4]   2> 214336 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] o.a.s.c.SolrCore 
[[collection1] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/control-001/cores/collection1],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/control-001/cores/collection1/data/]
   [junit4]   2> 214336 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] 
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX 
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@441793
   [junit4]   2> 214338 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: 
maxMergeAtOnce=22, maxMergeAtOnceExplicit=41, maxMergedSegmentMB=43.5400390625, 
floorSegmentMB=0.638671875, forceMergeDeletesPctAllowed=23.32787174353037, 
segmentsPerTier=49.0, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.8742144799803558
   [junit4]   2> 214343 WARN  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 214350 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] o.a.s.u.UpdateHandler 
Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 214350 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 214351 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] o.a.s.u.CommitTracker 
Hard AutoCommit: disabled
   [junit4]   2> 214351 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] o.a.s.u.CommitTracker 
Soft AutoCommit: disabled
   [junit4]   2> 214352 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: 
minMergeSize=1677721, mergeFactor=41, maxMergeSize=2147483648, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.0]
   [junit4]   2> 214353 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] 
o.a.s.s.SolrIndexSearcher Opening [Searcher@12f5016[collection1] main]
   [junit4]   2> 214354 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 214354 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 214355 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] 
o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 214355 INFO  
(searcherExecutor-941-thread-1-processing-n:127.0.0.1:41352_ x:collection1 
c:control_collection) [n:127.0.0.1:41352_ c:control_collection   x:collection1] 
o.a.s.c.SolrCore [collection1] Registered new searcher 
Searcher@12f5016[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 214355 INFO  
(coreLoadExecutor-940-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_ c:control_collection   x:collection1] o.a.s.u.UpdateLog 
Could not find max version in index or recent updates, using new clock 
1553878955133501440
   [junit4]   2> 214360 INFO  
(coreZkRegister-933-thread-1-processing-n:127.0.0.1:41352_ x:collection1 
c:control_collection) [n:127.0.0.1:41352_ c:control_collection s:shard1 
r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas 
found to continue.
   [junit4]   2> 214360 INFO  
(coreZkRegister-933-thread-1-processing-n:127.0.0.1:41352_ x:collection1 
c:control_collection) [n:127.0.0.1:41352_ c:control_collection s:shard1 
r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new 
leader - try and sync
   [junit4]   2> 214360 INFO  
(coreZkRegister-933-thread-1-processing-n:127.0.0.1:41352_ x:collection1 
c:control_collection) [n:127.0.0.1:41352_ c:control_collection s:shard1 
r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync replicas to 
http://127.0.0.1:41352/collection1/
   [junit4]   2> 214360 INFO  
(coreZkRegister-933-thread-1-processing-n:127.0.0.1:41352_ x:collection1 
c:control_collection) [n:127.0.0.1:41352_ c:control_collection s:shard1 
r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync 
replicas to me
   [junit4]   2> 214360 INFO  
(coreZkRegister-933-thread-1-processing-n:127.0.0.1:41352_ x:collection1 
c:control_collection) [n:127.0.0.1:41352_ c:control_collection s:shard1 
r:core_node1 x:collection1] o.a.s.c.SyncStrategy 
http://127.0.0.1:41352/collection1/ has no replicas
   [junit4]   2> 214363 INFO  
(coreZkRegister-933-thread-1-processing-n:127.0.0.1:41352_ x:collection1 
c:control_collection) [n:127.0.0.1:41352_ c:control_collection s:shard1 
r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new 
leader: http://127.0.0.1:41352/collection1/ shard1
   [junit4]   2> 214513 INFO  
(coreZkRegister-933-thread-1-processing-n:127.0.0.1:41352_ x:collection1 
c:control_collection) [n:127.0.0.1:41352_ c:control_collection s:shard1 
r:core_node1 x:collection1] o.a.s.c.ZkController I am the leader, no recovery 
necessary
   [junit4]   2> 214698 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 214698 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:34370/solr ready
   [junit4]   2> 214698 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection 
loss:false
   [junit4]   2> 214698 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase Creating collection1 with stateFormat=2
   [junit4]   2> 214779 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-1-001/cores/collection1
   [junit4]   2> 214779 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-1-001
   [junit4]   2> 214780 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 214781 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@163105d{/,null,AVAILABLE}
   [junit4]   2> 214781 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@13af820{HTTP/1.1,[http/1.1]}{127.0.0.1:34353}
   [junit4]   2> 214781 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.Server Started @217072ms
   [junit4]   2> 214781 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/tempDir-001/jetty1,
 solrconfig=solrconfig.xml, hostContext=/, hostPort=37726, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-1-001/cores}
   [junit4]   2> 214781 ERROR 
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 214782 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
6.4.0
   [junit4]   2> 214782 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 214782 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 214782 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2016-12-16T13:20:50.742Z
   [junit4]   2> 214785 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 214785 WARN  (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [    ] 
o.a.z.s.NIOServerCnxn caught end of stream exception
   [junit4]   2> EndOfStreamException: Unable to read additional data from 
client sessionid 0x15907caceaf0007, likely client has closed socket
   [junit4]   2>        at 
org.apache.zookeeper.server.NIOServerCnxn.doIO(NIOServerCnxn.java:228)
   [junit4]   2>        at 
org.apache.zookeeper.server.NIOServerCnxnFactory.run(NIOServerCnxnFactory.java:208)
   [junit4]   2>        at java.lang.Thread.run(Thread.java:745)
   [junit4]   2> 214785 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-1-001/solr.xml
   [junit4]   2> 214789 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 214789 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:34370/solr
   [junit4]   2> 214811 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:37726_    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (0) -> (1)
   [junit4]   2> 214813 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:37726_    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:37726_
   [junit4]   2> 214813 INFO  (zkCallback-428-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 214814 INFO  
(zkCallback-424-thread-1-processing-n:127.0.0.1:41352_) [n:127.0.0.1:41352_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 214815 INFO  
(zkCallback-434-thread-1-processing-n:127.0.0.1:37726_) [n:127.0.0.1:37726_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 214945 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:37726_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions 
underneath 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-1-001/cores
   [junit4]   2> 214945 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:37726_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 214946 INFO  
(OverseerStateUpdate-97117434597539844-127.0.0.1:41352_-n_0000000000) 
[n:127.0.0.1:41352_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard 
shard=shard2
   [junit4]   2> 215048 INFO  
(zkCallback-434-thread-1-processing-n:127.0.0.1:37726_) [n:127.0.0.1:37726_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [2])
   [junit4]   2> 215955 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.c.SolrConfig Using 
Lucene MatchVersion: 6.4.0
   [junit4]   2> 215980 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] Schema name=test
   [junit4]   2> 216058 WARN  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] default search field in schema is text. WARNING: Deprecated, 
please use 'df' on request instead.
   [junit4]   2> 216060 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded 
schema test/1.0 with uniqueid field id
   [junit4]   2> 216067 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1' using configuration from collection collection1
   [junit4]   2> 216067 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.c.SolrCore 
[[collection1] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-1-001/cores/collection1],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-1-001/cores/collection1/data/]
   [junit4]   2> 216068 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.c.JmxMonitoredMap JMX 
monitoring is enabled. Adding Solr mbeans to JMX Server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@441793
   [junit4]   2> 216069 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy 
RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: 
[TieredMergePolicy: maxMergeAtOnce=22, maxMergeAtOnceExplicit=41, 
maxMergedSegmentMB=43.5400390625, floorSegmentMB=0.638671875, 
forceMergeDeletesPctAllowed=23.32787174353037, segmentsPerTier=49.0, 
maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.8742144799803558
   [junit4]   2> 216072 WARN  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.c.RequestHandlers 
INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class 
= DumpRequestHandler,args = {defaults={a=A,b=B}}}
   [junit4]   2> 216079 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.u.UpdateHandler Using 
UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 216079 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 216080 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.u.CommitTracker Hard 
AutoCommit: disabled
   [junit4]   2> 216080 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.u.CommitTracker Soft 
AutoCommit: disabled
   [junit4]   2> 216081 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy 
RandomMergePolicy wrapping class 
org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: 
minMergeSize=1677721, mergeFactor=41, maxMergeSize=2147483648, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.0]
   [junit4]   2> 216081 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.s.SolrIndexSearcher 
Opening [Searcher@7b7903[collection1] main]
   [junit4]   2> 216082 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 216082 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 216082 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.h.ReplicationHandler 
Commits will be reserved for  10000
   [junit4]   2> 216083 INFO  
(searcherExecutor-952-thread-1-processing-n:127.0.0.1:37726_ x:collection1 
c:collection1) [n:127.0.0.1:37726_ c:collection1   x:collection1] 
o.a.s.c.SolrCore [collection1] Registered new searcher 
Searcher@7b7903[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 216083 INFO  
(coreLoadExecutor-951-thread-1-processing-n:127.0.0.1:37726_) 
[n:127.0.0.1:37726_ c:collection1   x:collection1] o.a.s.u.UpdateLog Could not 
find max version in index or recent updates, using new clock 1553878956945440768
   [junit4]   2> 216087 INFO  
(coreZkRegister-946-thread-1-processing-n:127.0.0.1:37726_ x:collection1 
c:collection1) [n:127.0.0.1:37726_ c:collection1 s:shard2 r:core_node1 
x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to 
continue.
   [junit4]   2> 216087 INFO  
(coreZkRegister-946-thread-1-processing-n:127.0.0.1:37726_ x:collection1 
c:collection1) [n:127.0.0.1:37726_ c:collection1 s:shard2 r:core_node1 
x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try 
and sync
   [junit4]   2> 216087 INFO  
(coreZkRegister-946-thread-1-processing-n:127.0.0.1:37726_ x:collection1 
c:collection1) [n:127.0.0.1:37726_ c:collection1 s:shard2 r:core_node1 
x:collection1] o.a.s.c.SyncStrategy Sync replicas to 
http://127.0.0.1:37726/collection1/
   [junit4]   2> 216087 INFO  
(coreZkRegister-946-thread-1-processing-n:127.0.0.1:37726_ x:collection1 
c:collection1) [n:127.0.0.1:37726_ c:collection1 s:shard2 r:core_node1 
x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 216087 INFO  
(coreZkRegister-946-thread-1-processing-n:127.0.0.1:37726_ x:collection1 
c:collection1) [n:127.0.0.1:37726_ c:collection1 s:shard2 r:core_node1 
x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:37726/collection1/ has no 
replicas
   [junit4]   2> 216089 INFO  
(coreZkRegister-946-thread-1-processing-n:127.0.0.1:37726_ x:collection1 
c:collection1) [n:127.0.0.1:37726_ c:collection1 s:shard2 r:core_node1 
x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: 
http://127.0.0.1:37726/collection1/ shard2
   [junit4]   2> 216190 INFO  
(zkCallback-434-thread-1-processing-n:127.0.0.1:37726_) [n:127.0.0.1:37726_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [2])
   [junit4]   2> 216241 INFO  
(coreZkRegister-946-thread-1-processing-n:127.0.0.1:37726_ x:collection1 
c:collection1) [n:127.0.0.1:37726_ c:collection1 s:shard2 r:core_node1 
x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 216343 INFO  
(zkCallback-434-thread-1-processing-n:127.0.0.1:37726_) [n:127.0.0.1:37726_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [2])
   [junit4]   2> 216518 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-2-001/cores/collection1
   [junit4]   2> 216519 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-2-001
   [junit4]   2> 216519 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 216531 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@1527dc4{/,null,AVAILABLE}
   [junit4]   2> 216532 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@147ab9b{HTTP/1.1,[http/1.1]}{127.0.0.1:36082}
   [junit4]   2> 216532 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.Server Started @218823ms
   [junit4]   2> 216532 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/tempDir-001/jetty2,
 solrconfig=solrconfig.xml, hostContext=/, hostPort=42767, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-2-001/cores}
   [junit4]   2> 216532 ERROR 
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 216532 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
6.4.0
   [junit4]   2> 216532 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 216532 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 216533 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2016-12-16T13:20:52.492Z
   [junit4]   2> 216535 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 216535 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-2-001/solr.xml
   [junit4]   2> 216543 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 216543 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:34370/solr
   [junit4]   2> 216550 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:42767_    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (0) -> (2)
   [junit4]   2> 216552 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:42767_    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:42767_
   [junit4]   2> 216553 INFO  (zkCallback-428-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 216553 INFO  
(zkCallback-424-thread-2-processing-n:127.0.0.1:41352_) [n:127.0.0.1:41352_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 216553 INFO  
(zkCallback-434-thread-1-processing-n:127.0.0.1:37726_) [n:127.0.0.1:37726_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 216554 INFO  
(zkCallback-440-thread-1-processing-n:127.0.0.1:42767_) [n:127.0.0.1:42767_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 216609 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:42767_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions 
underneath 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-2-001/cores
   [junit4]   2> 216609 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:42767_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 216610 INFO  
(OverseerStateUpdate-97117434597539844-127.0.0.1:41352_-n_0000000000) 
[n:127.0.0.1:41352_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard 
shard=shard1
   [junit4]   2> 216712 INFO  
(zkCallback-434-thread-1-processing-n:127.0.0.1:37726_) [n:127.0.0.1:37726_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 216712 INFO  
(zkCallback-440-thread-2-processing-n:127.0.0.1:42767_) [n:127.0.0.1:42767_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 217620 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.c.SolrConfig Using 
Lucene MatchVersion: 6.4.0
   [junit4]   2> 217646 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] Schema name=test
   [junit4]   2> 217956 WARN  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] default search field in schema is text. WARNING: Deprecated, 
please use 'df' on request instead.
   [junit4]   2> 217958 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded 
schema test/1.0 with uniqueid field id
   [junit4]   2> 217967 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1' using configuration from collection collection1
   [junit4]   2> 217968 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.c.SolrCore 
[[collection1] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-2-001/cores/collection1],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-2-001/cores/collection1/data/]
   [junit4]   2> 217968 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.c.JmxMonitoredMap JMX 
monitoring is enabled. Adding Solr mbeans to JMX Server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@441793
   [junit4]   2> 217969 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy 
RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: 
[TieredMergePolicy: maxMergeAtOnce=22, maxMergeAtOnceExplicit=41, 
maxMergedSegmentMB=43.5400390625, floorSegmentMB=0.638671875, 
forceMergeDeletesPctAllowed=23.32787174353037, segmentsPerTier=49.0, 
maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.8742144799803558
   [junit4]   2> 217974 WARN  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.c.RequestHandlers 
INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class 
= DumpRequestHandler,args = {defaults={a=A,b=B}}}
   [junit4]   2> 217985 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.u.UpdateHandler Using 
UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 217985 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 217985 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.u.CommitTracker Hard 
AutoCommit: disabled
   [junit4]   2> 217985 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.u.CommitTracker Soft 
AutoCommit: disabled
   [junit4]   2> 217986 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy 
RandomMergePolicy wrapping class 
org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: 
minMergeSize=1677721, mergeFactor=41, maxMergeSize=2147483648, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.0]
   [junit4]   2> 217986 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.s.SolrIndexSearcher 
Opening [Searcher@729caa[collection1] main]
   [junit4]   2> 217987 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 217988 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 217989 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.h.ReplicationHandler 
Commits will be reserved for  10000
   [junit4]   2> 217989 INFO  
(searcherExecutor-963-thread-1-processing-n:127.0.0.1:42767_ x:collection1 
c:collection1) [n:127.0.0.1:42767_ c:collection1   x:collection1] 
o.a.s.c.SolrCore [collection1] Registered new searcher 
Searcher@729caa[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 217990 INFO  
(coreLoadExecutor-962-thread-1-processing-n:127.0.0.1:42767_) 
[n:127.0.0.1:42767_ c:collection1   x:collection1] o.a.s.u.UpdateLog Could not 
find max version in index or recent updates, using new clock 1553878958945075200
   [junit4]   2> 217994 INFO  
(coreZkRegister-957-thread-1-processing-n:127.0.0.1:42767_ x:collection1 
c:collection1) [n:127.0.0.1:42767_ c:collection1 s:shard1 r:core_node2 
x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to 
continue.
   [junit4]   2> 217994 INFO  
(coreZkRegister-957-thread-1-processing-n:127.0.0.1:42767_ x:collection1 
c:collection1) [n:127.0.0.1:42767_ c:collection1 s:shard1 r:core_node2 
x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try 
and sync
   [junit4]   2> 217994 INFO  
(coreZkRegister-957-thread-1-processing-n:127.0.0.1:42767_ x:collection1 
c:collection1) [n:127.0.0.1:42767_ c:collection1 s:shard1 r:core_node2 
x:collection1] o.a.s.c.SyncStrategy Sync replicas to 
http://127.0.0.1:42767/collection1/
   [junit4]   2> 217994 INFO  
(coreZkRegister-957-thread-1-processing-n:127.0.0.1:42767_ x:collection1 
c:collection1) [n:127.0.0.1:42767_ c:collection1 s:shard1 r:core_node2 
x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 217994 INFO  
(coreZkRegister-957-thread-1-processing-n:127.0.0.1:42767_ x:collection1 
c:collection1) [n:127.0.0.1:42767_ c:collection1 s:shard1 r:core_node2 
x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:42767/collection1/ has no 
replicas
   [junit4]   2> 217995 INFO  
(coreZkRegister-957-thread-1-processing-n:127.0.0.1:42767_ x:collection1 
c:collection1) [n:127.0.0.1:42767_ c:collection1 s:shard1 r:core_node2 
x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: 
http://127.0.0.1:42767/collection1/ shard1
   [junit4]   2> 218097 INFO  
(zkCallback-434-thread-1-processing-n:127.0.0.1:37726_) [n:127.0.0.1:37726_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 218097 INFO  
(zkCallback-440-thread-2-processing-n:127.0.0.1:42767_) [n:127.0.0.1:42767_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 218147 INFO  
(coreZkRegister-957-thread-1-processing-n:127.0.0.1:42767_ x:collection1 
c:collection1) [n:127.0.0.1:42767_ c:collection1 s:shard1 r:core_node2 
x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 218183 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-3-001/cores/collection1
   [junit4]   2> 218183 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-3-001
   [junit4]   2> 218184 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 218185 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@1a09e26{/,null,AVAILABLE}
   [junit4]   2> 218185 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@14a5fdb{HTTP/1.1,[http/1.1]}{127.0.0.1:38291}
   [junit4]   2> 218185 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.Server Started @220476ms
   [junit4]   2> 218185 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/tempDir-001/jetty3,
 solrconfig=solrconfig.xml, hostContext=/, hostPort=36443, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-3-001/cores}
   [junit4]   2> 218186 ERROR 
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 218186 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
6.4.0
   [junit4]   2> 218186 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 218186 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 218186 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2016-12-16T13:20:54.146Z
   [junit4]   2> 218188 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 218188 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-3-001/solr.xml
   [junit4]   2> 218203 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 218204 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:34370/solr
   [junit4]   2> 218209 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:36443_    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (0) -> (3)
   [junit4]   2> 218211 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:36443_    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:36443_
   [junit4]   2> 218212 INFO  
(zkCallback-424-thread-1-processing-n:127.0.0.1:41352_) [n:127.0.0.1:41352_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 218212 INFO  (zkCallback-428-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 218212 INFO  
(zkCallback-434-thread-1-processing-n:127.0.0.1:37726_) [n:127.0.0.1:37726_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 218213 INFO  
(zkCallback-446-thread-1-processing-n:127.0.0.1:36443_) [n:127.0.0.1:36443_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 218213 INFO  
(zkCallback-434-thread-1-processing-n:127.0.0.1:37726_) [n:127.0.0.1:37726_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 218213 INFO  
(zkCallback-440-thread-1-processing-n:127.0.0.1:42767_) [n:127.0.0.1:42767_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 218216 INFO  
(zkCallback-440-thread-2-processing-n:127.0.0.1:42767_) [n:127.0.0.1:42767_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 218298 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:36443_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions 
underneath 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-3-001/cores
   [junit4]   2> 218298 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) 
[n:127.0.0.1:36443_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 218300 INFO  
(OverseerStateUpdate-97117434597539844-127.0.0.1:41352_-n_0000000000) 
[n:127.0.0.1:41352_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard 
shard=shard2
   [junit4]   2> 218402 WARN  
(OverseerStateUpdate-97117434597539844-127.0.0.1:41352_-n_0000000000) 
[n:127.0.0.1:41352_    ] o.a.s.c.Overseer Bad version writing to ZK using 
compare-and-set, will force refresh cluster state: KeeperErrorCode = BadVersion 
for /collections/collection1/state.json
   [junit4]   2> 218425 INFO  
(zkCallback-434-thread-1-processing-n:127.0.0.1:37726_) [n:127.0.0.1:37726_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 218425 INFO  
(zkCallback-446-thread-1-processing-n:127.0.0.1:36443_) [n:127.0.0.1:36443_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 218425 INFO  
(zkCallback-440-thread-2-processing-n:127.0.0.1:42767_) [n:127.0.0.1:42767_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 218425 INFO  
(OverseerStateUpdate-97117434597539844-127.0.0.1:41352_-n_0000000000) 
[n:127.0.0.1:41352_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard 
shard=shard2
   [junit4]   2> 218426 INFO  
(zkCallback-434-thread-1-processing-n:127.0.0.1:37726_) [n:127.0.0.1:37726_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 218426 INFO  
(zkCallback-440-thread-2-processing-n:127.0.0.1:42767_) [n:127.0.0.1:42767_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 218426 INFO  
(zkCallback-446-thread-1-processing-n:127.0.0.1:36443_) [n:127.0.0.1:36443_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 219309 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.c.SolrConfig Using 
Lucene MatchVersion: 6.4.0
   [junit4]   2> 219318 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] Schema name=test
   [junit4]   2> 219391 WARN  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] default search field in schema is text. WARNING: Deprecated, 
please use 'df' on request instead.
   [junit4]   2> 219393 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded 
schema test/1.0 with uniqueid field id
   [junit4]   2> 219399 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1' using configuration from collection collection1
   [junit4]   2> 219399 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.c.SolrCore 
[[collection1] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-3-001/cores/collection1],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001/shard-3-001/cores/collection1/data/]
   [junit4]   2> 219399 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.c.JmxMonitoredMap JMX 
monitoring is enabled. Adding Solr mbeans to JMX Server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@441793
   [junit4]   2> 219400 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy 
RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: 
[TieredMergePolicy: maxMergeAtOnce=22, maxMergeAtOnceExplicit=41, 
maxMergedSegmentMB=43.5400390625, floorSegmentMB=0.638671875, 
forceMergeDeletesPctAllowed=23.32787174353037, segmentsPerTier=49.0, 
maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.8742144799803558
   [junit4]   2> 219403 WARN  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.c.RequestHandlers 
INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class 
= DumpRequestHandler,args = {defaults={a=A,b=B}}}
   [junit4]   2> 219412 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.u.UpdateHandler Using 
UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 219412 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 219412 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.u.CommitTracker Hard 
AutoCommit: disabled
   [junit4]   2> 219412 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.u.CommitTracker Soft 
AutoCommit: disabled
   [junit4]   2> 219413 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy 
RandomMergePolicy wrapping class 
org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: 
minMergeSize=1677721, mergeFactor=41, maxMergeSize=2147483648, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.0]
   [junit4]   2> 219413 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.s.SolrIndexSearcher 
Opening [Searcher@9d72c2[collection1] main]
   [junit4]   2> 219414 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 219414 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 219414 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.h.ReplicationHandler 
Commits will be reserved for  10000
   [junit4]   2> 219415 INFO  
(searcherExecutor-974-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
c:collection1) [n:127.0.0.1:36443_ c:collection1   x:collection1] 
o.a.s.c.SolrCore [collection1] Registered new searcher 
Searcher@9d72c2[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 219415 INFO  
(coreLoadExecutor-973-thread-1-processing-n:127.0.0.1:36443_) 
[n:127.0.0.1:36443_ c:collection1   x:collection1] o.a.s.u.UpdateLog Could not 
find max version in index or recent updates, using new clock 1553878960439296000
   [junit4]   2> 219418 INFO  
(coreZkRegister-968-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
c:collection1) [n:127.0.0.1:36443_ c:collection1 s:shard2 r:core_node3 
x:collection1] o.a.s.c.ZkController Core needs to recover:collection1
   [junit4]   2> 219419 INFO  
(updateExecutor-443-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.DefaultSolrCoreState Running recovery
   [junit4]   2> 219419 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Starting recovery process. 
recoveringAfterStartup=true
   [junit4]   2> 219419 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy ###### startupVersions=[[]]
   [junit4]   2> 219419 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Begin buffering updates. 
core=[collection1]
   [junit4]   2> 219419 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.UpdateLog Starting to buffer updates. 
FSUpdateLog{state=ACTIVE, tlog=null}
   [junit4]   2> 219419 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Publishing state of core 
[collection1] as recovering, leader is [http://127.0.0.1:37726/collection1/] 
and I am [http://127.0.0.1:36443/collection1/]
   [junit4]   2> 219421 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Sending prep recovery 
command to [http://127.0.0.1:37726]; [WaitForState: 
action=PREPRECOVERY&core=collection1&nodeName=127.0.0.1:36443_&coreNodeName=core_node3&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
   [junit4]   2> 219424 INFO  (SocketProxy-Acceptor-37726) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=38652,localport=37726], receiveBufferSize:531000
   [junit4]   2> 219426 INFO  (SocketProxy-Acceptor-37726) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=34353,localport=54216], receiveBufferSize=530904
   [junit4]   2> 219427 INFO  (qtp6871875-2261) [n:127.0.0.1:37726_    ] 
o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node3, state: 
recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true
   [junit4]   2> 219428 INFO  (qtp6871875-2261) [n:127.0.0.1:37726_    ] 
o.a.s.h.a.PrepRecoveryOp Will wait a max of 183 seconds to see collection1 
(shard2 of collection1) have state: recovering
   [junit4]   2> 219428 INFO  (qtp6871875-2261) [n:127.0.0.1:37726_    ] 
o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, 
shard=shard2, thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? 
true, live=true, checkLive=true, currentState=down, localState=active, 
nodeName=127.0.0.1:36443_, coreNodeName=core_node3, 
onlyIfActiveCheckResult=false, nodeProps: 
core_node3:{"core":"collection1","base_url":"http://127.0.0.1:36443","node_name":"127.0.0.1:36443_","state":"down"}
   [junit4]   2> 219522 INFO  
(zkCallback-440-thread-2-processing-n:127.0.0.1:42767_) [n:127.0.0.1:42767_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 219522 INFO  
(zkCallback-434-thread-1-processing-n:127.0.0.1:37726_) [n:127.0.0.1:37726_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 219522 INFO  
(zkCallback-446-thread-1-processing-n:127.0.0.1:36443_) [n:127.0.0.1:36443_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 219801 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.SolrTestCaseJ4 ###Starting test
   [junit4]   2> 219801 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase Wait for recoveries to finish - wait 
30000 for each attempt
   [junit4]   2> 219801 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection: 
collection1 failOnTimeout:true timeout (sec):30000
   [junit4]   2> 220428 INFO  (qtp6871875-2261) [n:127.0.0.1:37726_    ] 
o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, 
shard=shard2, thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? 
true, live=true, checkLive=true, currentState=recovering, localState=active, 
nodeName=127.0.0.1:36443_, coreNodeName=core_node3, 
onlyIfActiveCheckResult=false, nodeProps: 
core_node3:{"core":"collection1","base_url":"http://127.0.0.1:36443","node_name":"127.0.0.1:36443_","state":"recovering"}
   [junit4]   2> 220429 INFO  (qtp6871875-2261) [n:127.0.0.1:37726_    ] 
o.a.s.h.a.PrepRecoveryOp Waited coreNodeName: core_node3, state: recovering, 
checkLive: true, onlyIfLeader: true for: 1 seconds.
   [junit4]   2> 220429 INFO  (qtp6871875-2261) [n:127.0.0.1:37726_    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores 
params={nodeName=127.0.0.1:36443_&onlyIfLeaderActive=true&core=collection1&coreNodeName=core_node3&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2}
 status=0 QTime=1001
   [junit4]   2> 227430 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Attempting to PeerSync 
from [http://127.0.0.1:37726/collection1/] - recoveringAfterStartup=[true]
   [junit4]   2> 227430 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.PeerSync PeerSync: core=collection1 
url=http://127.0.0.1:36443 START replicas=[http://127.0.0.1:37726/collection1/] 
nUpdates=100
   [junit4]   2> 227432 INFO  (SocketProxy-Acceptor-37726) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=38686,localport=37726], receiveBufferSize:531000
   [junit4]   2> 227432 INFO  (SocketProxy-Acceptor-37726) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=34353,localport=54250], receiveBufferSize=530904
   [junit4]   2> 227436 INFO  (qtp6871875-2261) [n:127.0.0.1:37726_ 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.IndexFingerprint 
IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, 
maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, 
maxDoc=0}
   [junit4]   2> 227436 INFO  (qtp6871875-2261) [n:127.0.0.1:37726_ 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request 
[collection1]  webapp= path=/get 
params={distrib=false&qt=/get&getFingerprint=9223372036854775807&wt=javabin&version=2}
 status=0 QTime=1
   [junit4]   2> 227437 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.IndexFingerprint IndexFingerprint 
millis:0.0 result:{maxVersionSpecified=9223372036854775807, 
maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, 
maxDoc=0}
   [junit4]   2> 227437 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.PeerSync We are already in sync. No need to 
do a PeerSync 
   [junit4]   2> 227437 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 227437 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted 
changes. Skipping IW.commit.
   [junit4]   2> 227437 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 227437 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy PeerSync stage of recovery 
was successful.
   [junit4]   2> 227437 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Replaying updates buffered 
during PeerSync.
   [junit4]   2> 227437 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy No replay needed.
   [junit4]   2> 227437 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:36443_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Registering as Active 
after recovery.
   [junit4]   2> 227439 INFO  
(zkCallback-446-thread-1-processing-n:127.0.0.1:36443_) [n:127.0.0.1:36443_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 227439 INFO  
(zkCallback-434-thread-1-processing-n:127.0.0.1:37726_) [n:127.0.0.1:37726_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 227439 INFO  
(zkCallback-440-thread-2-processing-n:127.0.0.1:42767_) [n:127.0.0.1:42767_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 227804 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.AbstractDistribZkTestBase Recoveries finished - collection: collection1
   [junit4]   2> 227805 INFO  (SocketProxy-Acceptor-41352) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=59492,localport=41352], receiveBufferSize:531000
   [junit4]   2> 227806 INFO  (SocketProxy-Acceptor-41352) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=44927,localport=55352], receiveBufferSize=530904
   [junit4]   2> 227807 INFO  (qtp4042978-2223) [n:127.0.0.1:41352_ 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.DirectUpdateHandler2 start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 227807 INFO  (qtp4042978-2223) [n:127.0.0.1:41352_ 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 227808 INFO  (qtp4042978-2223) [n:127.0.0.1:41352_ 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 227808 INFO  (qtp4042978-2223) [n:127.0.0.1:41352_ 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update 
params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=}
 0 0
   [junit4]   2> 227809 INFO  (SocketProxy-Acceptor-37726) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=38700,localport=37726], receiveBufferSize:531000
   [junit4]   2> 227810 INFO  (SocketProxy-Acceptor-37726) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=34353,localport=54264], receiveBufferSize=530904
   [junit4]   2> 227813 INFO  (SocketProxy-Acceptor-37726) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=38704,localport=37726], receiveBufferSize:531000
   [junit4]   2> 227813 INFO  (SocketProxy-Acceptor-42767) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=55700,localport=42767], receiveBufferSize:531000
   [junit4]   2> 227814 INFO  (SocketProxy-Acceptor-37726) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=34353,localport=54272], receiveBufferSize=530904
   [junit4]   2> 227814 INFO  (SocketProxy-Acceptor-42767) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=36082,localport=39566], receiveBufferSize=530904
   [junit4]   2> 227816 INFO  (SocketProxy-Acceptor-36443) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=35684,localport=36443], receiveBufferSize:531000
   [junit4]   2> 227816 INFO  (SocketProxy-Acceptor-36443) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=38291,localport=42802], receiveBufferSize=530904
   [junit4]   2> 227818 INFO  (qtp26838668-2291) [n:127.0.0.1:42767_ 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 
start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 227818 INFO  (qtp6871875-2259) [n:127.0.0.1:37726_ 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 
start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 227819 INFO  (qtp26838668-2291) [n:127.0.0.1:42767_ 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 
No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 227819 INFO  (qtp16395179-2322) [n:127.0.0.1:36443_ 
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 
start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 227819 INFO  (qtp6871875-2259) [n:127.0.0.1:37726_ 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 
No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 227819 INFO  (qtp26838668-2291) [n:127.0.0.1:42767_ 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 
end_commit_flush
   [junit4]   2> 227820 INFO  (qtp16395179-2322) [n:127.0.0.1:36443_ 
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 
No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 227820 INFO  (qtp26838668-2291) [n:127.0.0.1:42767_ 
c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update 
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:37726/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
 0 2
   [junit4]   2> 227820 INFO  (qtp6871875-2259) [n:127.0.0.1:37726_ 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 
end_commit_flush
   [junit4]   2> 227821 INFO  (qtp16395179-2322) [n:127.0.0.1:36443_ 
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 
end_commit_flush
   [junit4]   2> 227821 INFO  (qtp6871875-2259) [n:127.0.0.1:37726_ 
c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update 
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:37726/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
 0 2
   [junit4]   2> 227821 INFO  (qtp16395179-2322) [n:127.0.0.1:36443_ 
c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update 
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:37726/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
 0 2
   [junit4]   2> 227821 INFO  (qtp6871875-2262) [n:127.0.0.1:37726_ 
c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update 
params={_stateVer_=collection1:11&waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=}
 0 10
   [junit4]   2> 227825 INFO  (SocketProxy-Acceptor-37726) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=38716,localport=37726], receiveBufferSize:531000
   [junit4]   2> 227825 INFO  (SocketProxy-Acceptor-37726) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=34353,localport=54280], receiveBufferSize=530904
   [junit4]   2> 227827 INFO  (qtp6871875-2261) [n:127.0.0.1:37726_ 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request 
[collection1]  webapp= path=/select 
params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2}
 hits=0 status=0 QTime=0
   [junit4]   2> 227829 INFO  (SocketProxy-Acceptor-36443) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=35698,localport=36443], receiveBufferSize:531000
   [junit4]   2> 227829 INFO  (SocketProxy-Acceptor-36443) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=38291,localport=42810], receiveBufferSize=530904
   [junit4]   2> 227831 INFO  (qtp16395179-2324) [n:127.0.0.1:36443_ 
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.S.Request 
[collection1]  webapp= path=/select 
params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2}
 hits=0 status=0 QTime=0
   [junit4]   2> 227832 INFO  (SocketProxy-Acceptor-42767) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=55716,localport=42767], receiveBufferSize:531000
   [junit4]   2> 227833 INFO  (SocketProxy-Acceptor-42767) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=36082,localport=39580], receiveBufferSize=530904
   [junit4]   2> 227834 INFO  (qtp26838668-2293) [n:127.0.0.1:42767_ 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.S.Request 
[collection1]  webapp= path=/select 
params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2}
 hits=0 status=0 QTime=0
   [junit4]   2> 229835 INFO  (SocketProxy-Acceptor-42767) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=55734,localport=42767], receiveBufferSize:531000
   [junit4]   2> 229836 INFO  (SocketProxy-Acceptor-42767) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=36082,localport=39598], receiveBufferSize=530904
   [junit4]   2> 229838 INFO  (qtp26838668-2291) [n:127.0.0.1:42767_    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params 
replicationFactor=3&maxShardsPerNode=1&name=c8n_1x3_lf&action=CREATE&numShards=1&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 229856 INFO  
(OverseerThreadFactory-938-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_    ] o.a.s.c.CreateCollectionCmd Create collection 
c8n_1x3_lf
   [junit4]   2> 229857 INFO  
(OverseerThreadFactory-938-thread-1-processing-n:127.0.0.1:41352_) 
[n:127.0.0.1:41352_    ] o.a.s.c.CreateCollectionCmd Only one config set found 
in zk - using it:conf1
   [junit4]   2> 229960 INFO  (SocketProxy-Acceptor-36443) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=35724,localport=36443], receiveBufferSize:531000
   [junit4]   2> 229961 INFO  (SocketProxy-Acceptor-36443) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=38291,localport=42836], receiveBufferSize=530904
   [junit4]   2> 229963 INFO  (SocketProxy-Acceptor-41352) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=59546,localport=41352], receiveBufferSize:531000
   [junit4]   2> 229963 INFO  (SocketProxy-Acceptor-37726) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=38752,localport=37726], receiveBufferSize:531000
   [junit4]   2> 229963 INFO  (qtp16395179-2322) [n:127.0.0.1:36443_    ] 
o.a.s.h.a.CoreAdminOperation core create command 
qt=/admin/cores&collection.configName=conf1&new

[...truncated too long message...]

teExecutor-443-thread-2-processing-n:127.0.0.1:36443_) [n:127.0.0.1:36443_ 
c:c8n_1x3_lf s:shard1 r:core_node1 x:c8n_1x3_lf_shard1_replica1] 
o.a.s.u.DefaultSolrCoreState Skipping recovery because Solr is shutdown
   [junit4]   2> 324682 INFO  
(zkCallback-446-thread-6-processing-n:127.0.0.1:36443_) [n:127.0.0.1:36443_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [1])
   [junit4]   2> 327524 INFO  
(zkCallback-446-thread-4-processing-n:127.0.0.1:36443_) [n:127.0.0.1:36443_    
] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/c8n_1x3_lf/state.json] for collection [c8n_1x3_lf] has 
occurred - updating... (live nodes size: [1])
   [junit4]   2> 327526 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ 
x:c8n_1x3_lf_shard1_replica1 s:shard1 c:c8n_1x3_lf r:core_node1) 
[n:127.0.0.1:36443_ c:c8n_1x3_lf s:shard1 r:core_node1 
x:c8n_1x3_lf_shard1_replica1] o.a.s.c.RecoveryStrategy RecoveryStrategy has 
been closed
   [junit4]   2> 327526 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ 
x:c8n_1x3_lf_shard1_replica1 s:shard1 c:c8n_1x3_lf r:core_node1) 
[n:127.0.0.1:36443_ c:c8n_1x3_lf s:shard1 r:core_node1 
x:c8n_1x3_lf_shard1_replica1] o.a.s.c.RecoveryStrategy Finished recovery 
process, successful=[false]
   [junit4]   2> 327526 INFO  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ 
x:c8n_1x3_lf_shard1_replica1 s:shard1 c:c8n_1x3_lf r:core_node1) 
[n:127.0.0.1:36443_ c:c8n_1x3_lf s:shard1 r:core_node1 
x:c8n_1x3_lf_shard1_replica1] o.a.s.c.SolrCore [c8n_1x3_lf_shard1_replica1]  
CLOSING SolrCore org.apache.solr.core.SolrCore@17b0e08
   [junit4]   2> 327526 WARN  
(recoveryExecutor-444-thread-1-processing-n:127.0.0.1:36443_ 
x:c8n_1x3_lf_shard1_replica1 s:shard1 c:c8n_1x3_lf r:core_node1) 
[n:127.0.0.1:36443_ c:c8n_1x3_lf s:shard1 r:core_node1 
x:c8n_1x3_lf_shard1_replica1] o.a.s.c.RecoveryStrategy Stopping recovery for 
core=[c8n_1x3_lf_shard1_replica1] coreNodeName=[core_node1]
   [junit4]   2> 327536 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.Overseer Overseer (id=97117434597539855-127.0.0.1:36443_-n_0000000003) 
closing
   [junit4]   2> 327536 INFO  
(OverseerStateUpdate-97117434597539855-127.0.0.1:36443_-n_0000000003) 
[n:127.0.0.1:36443_    ] o.a.s.c.Overseer Overseer Loop exiting : 
127.0.0.1:36443_
   [junit4]   2> 327539 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.AbstractConnector Stopped 
ServerConnector@14a5fdb{HTTP/1.1,[http/1.1]}{127.0.0.1:0}
   [junit4]   2> 327539 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.e.j.s.h.ContextHandler Stopped 
o.e.j.s.ServletContextHandler@1a09e26{/,null,UNAVAILABLE}
   [junit4]   2> 327540 INFO  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.ZkTestServer connecting to 127.0.0.1:34370 34370
   [junit4]   2> 327657 INFO  (Thread-577) [    ] o.a.s.c.ZkTestServer 
connecting to 127.0.0.1:34370 34370
   [junit4]   2> 327658 WARN  (Thread-577) [    ] o.a.s.c.ZkTestServer Watch 
limit violations: 
   [junit4]   2> Maximum concurrent create/delete watches above limit:
   [junit4]   2> 
   [junit4]   2>        5       /solr/aliases.json
   [junit4]   2>        5       /solr/clusterprops.json
   [junit4]   2>        4       /solr/security.json
   [junit4]   2>        4       /solr/configs/conf1
   [junit4]   2> 
   [junit4]   2> Maximum concurrent data watches above limit:
   [junit4]   2> 
   [junit4]   2>        5       /solr/clusterstate.json
   [junit4]   2>        3       /solr/collections/c8n_1x3_lf/state.json
   [junit4]   2>        3       /solr/collections/collection1/state.json
   [junit4]   2>        2       
/solr/overseer_elect/election/97117434597539844-127.0.0.1:41352_-n_0000000000
   [junit4]   2> 
   [junit4]   2> Maximum concurrent children watches above limit:
   [junit4]   2> 
   [junit4]   2>        113     /solr/overseer/collection-queue-work
   [junit4]   2>        39      /solr/overseer/queue
   [junit4]   2>        23      /solr/overseer/queue-work
   [junit4]   2>        5       /solr/live_nodes
   [junit4]   2>        5       /solr/collections
   [junit4]   2> 
   [junit4]   2> 327658 WARN  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.SocketProxy Closing 0 connections to: http://127.0.0.1:37726/, target: 
http://127.0.0.1:34353/
   [junit4]   2> 327658 WARN  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.SocketProxy Closing 1 connections to: http://127.0.0.1:36443/, target: 
http://127.0.0.1:38291/
   [junit4]   2> 327658 WARN  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.SocketProxy Closing 12 connections to: http://127.0.0.1:41352/, target: 
http://127.0.0.1:44927/
   [junit4]   2> 327658 WARN  
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[4252C772B8A8F31F]) [    ] 
o.a.s.c.SocketProxy Closing 3 connections to: http://127.0.0.1:42767/, target: 
http://127.0.0.1:36082/
   [junit4]   2> NOTE: reproduce with: ant test  
-Dtestcase=LeaderFailoverAfterPartitionTest -Dtests.method=test 
-Dtests.seed=4252C772B8A8F31F -Dtests.multiplier=3 -Dtests.slow=true 
-Dtests.locale=de-LU -Dtests.timezone=Asia/Dili -Dtests.asserts=true 
-Dtests.file.encoding=ISO-8859-1
   [junit4] FAILURE  115s J2 | LeaderFailoverAfterPartitionTest.test <<<
   [junit4]    > Throwable #1: java.lang.AssertionError: Expected 2 of 3 
replicas to be active but only found 1; 
[core_node2:{"core":"c8n_1x3_lf_shard1_replica3","base_url":"http://127.0.0.1:41352","node_name":"127.0.0.1:41352_","state":"active","leader":"true"}];
 clusterState: DocCollection(c8n_1x3_lf//collections/c8n_1x3_lf/state.json/17)={
   [junit4]    >   "replicationFactor":"3",
   [junit4]    >   "shards":{"shard1":{
   [junit4]    >       "range":"80000000-7fffffff",
   [junit4]    >       "state":"active",
   [junit4]    >       "replicas":{
   [junit4]    >         "core_node1":{
   [junit4]    >           "state":"down",
   [junit4]    >           "base_url":"http://127.0.0.1:36443";,
   [junit4]    >           "core":"c8n_1x3_lf_shard1_replica1",
   [junit4]    >           "node_name":"127.0.0.1:36443_"},
   [junit4]    >         "core_node2":{
   [junit4]    >           "core":"c8n_1x3_lf_shard1_replica3",
   [junit4]    >           "base_url":"http://127.0.0.1:41352";,
   [junit4]    >           "node_name":"127.0.0.1:41352_",
   [junit4]    >           "state":"active",
   [junit4]    >           "leader":"true"},
   [junit4]    >         "core_node3":{
   [junit4]    >           "core":"c8n_1x3_lf_shard1_replica2",
   [junit4]    >           "base_url":"http://127.0.0.1:37726";,
   [junit4]    >           "node_name":"127.0.0.1:37726_",
   [junit4]    >           "state":"down"}}}},
   [junit4]    >   "router":{"name":"compositeId"},
   [junit4]    >   "maxShardsPerNode":"1",
   [junit4]    >   "autoAddReplicas":"false"}
   [junit4]    >        at 
__randomizedtesting.SeedInfo.seed([4252C772B8A8F31F:CA06F8A816549EE7]:0)
   [junit4]    >        at 
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:168)
   [junit4]    >        at 
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:55)
   [junit4]    >        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:992)
   [junit4]    >        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:967)
   [junit4]    >        at java.lang.Thread.run(Thread.java:745)
   [junit4]   2> 327662 INFO  
(SUITE-LeaderFailoverAfterPartitionTest-seed#[4252C772B8A8F31F]-worker) [    ] 
o.a.s.SolrTestCaseJ4 ###deleteCore
   [junit4]   2> NOTE: leaving temporary files on disk at: 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_4252C772B8A8F31F-001
   [junit4]   2> Dec 16, 2016 1:22:43 PM 
com.carrotsearch.randomizedtesting.ThreadLeakControl checkThreadLeaks
   [junit4]   2> WARNING: Will linger awaiting termination of 1 leaked 
thread(s).
   [junit4]   2> NOTE: test params are: codec=Asserting(Lucene62): 
{range_facet_l_dv=PostingsFormat(name=Asserting), 
_version_=Lucene50(blocksize=128), multiDefault=FST50, 
a_t=Lucene50(blocksize=128), intDefault=Lucene50(blocksize=128), 
id=PostingsFormat(name=Asserting), range_facet_i_dv=Lucene50(blocksize=128), 
text=PostingsFormat(name=LuceneVarGapFixedInterval), 
range_facet_l=Lucene50(blocksize=128), timestamp=Lucene50(blocksize=128)}, 
docValues:{range_facet_l_dv=DocValuesFormat(name=Asserting), 
range_facet_i_dv=DocValuesFormat(name=Memory), 
timestamp=DocValuesFormat(name=Memory)}, maxPointsInLeafNode=1950, 
maxMBSortInHeap=5.105494123369168, 
sim=RandomSimilarity(queryNorm=false,coord=no): {}, locale=de-LU, 
timezone=Asia/Dili
   [junit4]   2> NOTE: Linux 4.4.0-53-generic i386/Oracle Corporation 1.8.0_112 
(32-bit)/cpus=12,threads=1,free=47002208,total=154370048
   [junit4]   2> NOTE: All tests run in this JVM: [TestConfigSets, 
HdfsDirectoryFactoryTest, TestReloadDeadlock, PrimitiveFieldTypeTest, 
LeaderElectionTest, CircularListTest, SOLR749Test, 
LeaderInitiatedRecoveryOnShardRestartTest, PluginInfoTest, AtomicUpdatesTest, 
TestBulkSchemaConcurrent, TestMiniSolrCloudCluster, PrimUtilsTest, 
TestCollationField, TestShortCircuitedRequests, TestNumericTerms64, 
QueryParsingTest, RemoteQueryErrorTest, 
StatelessScriptUpdateProcessorFactoryTest, TestDynamicFieldResource, 
ConjunctionSolrSpellCheckerTest, TestSolrQueryParser, 
ReplicaListTransformerTest, TestLRUStatsCache, JavabinLoaderTest, 
TestSmileRequest, TestSort, JSONWriterTest, LeaderFailoverAfterPartitionTest]
   [junit4] Completed [145/662 (1!)] on J2 in 115.34s, 1 test, 1 failure <<< 
FAILURES!

[...truncated 56259 lines...]

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to