Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/20355/
Java: 32bit/jdk1.8.0_144 -server -XX:+UseParallelGC

1 tests failed.
FAILED:  org.apache.solr.cloud.HttpPartitionTest.test

Error Message:
Doc with id=1 not found in http://127.0.0.1:36769/m_w/collMinRf_1x3 due to: 
Path not found: /id; rsp={doc=null}

Stack Trace:
java.lang.AssertionError: Doc with id=1 not found in 
http://127.0.0.1:36769/m_w/collMinRf_1x3 due to: Path not found: /id; 
rsp={doc=null}
        at 
__randomizedtesting.SeedInfo.seed([D31F5E8D9AE0F05:8565CA32775262FD]:0)
        at org.junit.Assert.fail(Assert.java:93)
        at org.junit.Assert.assertTrue(Assert.java:43)
        at 
org.apache.solr.cloud.HttpPartitionTest.assertDocExists(HttpPartitionTest.java:603)
        at 
org.apache.solr.cloud.HttpPartitionTest.assertDocsExistInAllReplicas(HttpPartitionTest.java:558)
        at 
org.apache.solr.cloud.HttpPartitionTest.testMinRf(HttpPartitionTest.java:249)
        at 
org.apache.solr.cloud.HttpPartitionTest.test(HttpPartitionTest.java:127)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:993)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at java.lang.Thread.run(Thread.java:748)




Build Log:
[...truncated 10969 lines...]
   [junit4] Suite: org.apache.solr.cloud.HttpPartitionTest
   [junit4]   2> Creating dataDir: 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/init-core-data-001
   [junit4]   2> 202544 WARN  
(SUITE-HttpPartitionTest-seed#[D31F5E8D9AE0F05]-worker) [    ] 
o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=2 numCloses=2
   [junit4]   2> 202544 INFO  
(SUITE-HttpPartitionTest-seed#[D31F5E8D9AE0F05]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Using TrieFields (NUMERIC_POINTS_SYSPROP=false) 
w/NUMERIC_DOCVALUES_SYSPROP=false
   [junit4]   2> 202545 INFO  
(SUITE-HttpPartitionTest-seed#[D31F5E8D9AE0F05]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via: 
@org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl=https://issues.apache.org/jira/browse/SOLR-5776)
   [junit4]   2> 202545 INFO  
(SUITE-HttpPartitionTest-seed#[D31F5E8D9AE0F05]-worker) [    ] 
o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: 
test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
   [junit4]   2> 202545 INFO  
(SUITE-HttpPartitionTest-seed#[D31F5E8D9AE0F05]-worker) [    ] 
o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /m_w/
   [junit4]   2> 202547 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 202548 INFO  (Thread-537) [    ] o.a.s.c.ZkTestServer client 
port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 202548 INFO  (Thread-537) [    ] o.a.s.c.ZkTestServer Starting 
server
   [junit4]   2> 202549 ERROR (Thread-537) [    ] o.a.z.s.ZooKeeperServer 
ZKShutdownHandler is not registered, so ZooKeeper server won't take any action 
on ERROR or SHUTDOWN server state changes
   [junit4]   2> 202648 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.ZkTestServer start zk server on port:44917
   [junit4]   2> 202653 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
 to /configs/conf1/solrconfig.xml
   [junit4]   2> 202654 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/schema.xml
 to /configs/conf1/schema.xml
   [junit4]   2> 202654 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
 to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
   [junit4]   2> 202655 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/stopwords.txt
 to /configs/conf1/stopwords.txt
   [junit4]   2> 202655 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/protwords.txt
 to /configs/conf1/protwords.txt
   [junit4]   2> 202656 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/currency.xml
 to /configs/conf1/currency.xml
   [junit4]   2> 202656 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml
 to /configs/conf1/enumsConfig.xml
   [junit4]   2> 202656 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json
 to /configs/conf1/open-exchange-rates.json
   [junit4]   2> 202657 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt
 to /configs/conf1/mapping-ISOLatin1Accent.txt
   [junit4]   2> 202657 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt
 to /configs/conf1/old_synonyms.txt
   [junit4]   2> 202657 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
 to /configs/conf1/synonyms.txt
   [junit4]   2> 202658 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase Will use TLOG replicas unless explicitly 
asked otherwise
   [junit4]   2> 202717 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.e.j.s.Server 
jetty-9.3.20.v20170531
   [junit4]   2> 202717 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@28d244{/m_w,null,AVAILABLE}
   [junit4]   2> 202718 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@201e44{HTTP/1.1,[http/1.1]}{127.0.0.1:36377}
   [junit4]   2> 202718 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.e.j.s.Server 
Started @204274ms
   [junit4]   2> 202718 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/tempDir-001/control/data,
 hostContext=/m_w, hostPort=33981, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/control-001/cores}
   [junit4]   2> 202718 ERROR 
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 202718 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
8.0.0
   [junit4]   2> 202718 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 202718 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null, Default config 
dir: null
   [junit4]   2> 202718 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2017-08-21T18:54:14.806Z
   [junit4]   2> 202720 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 202720 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/control-001/solr.xml
   [junit4]   2> 202723 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.SolrXmlConfig MBean server found: 
com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b, but no JMX reporters were 
configured - adding default JMX reporter.
   [junit4]   2> 202726 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.a.s.c.ZkContainer 
Zookeeper client=127.0.0.1:44917/solr
   [junit4]   2> 202752 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:33981_m_w    
] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 202752 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:33981_m_w    
] o.a.s.c.OverseerElectionContext I am going to be the leader 
127.0.0.1:33981_m_w
   [junit4]   2> 202752 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:33981_m_w    
] o.a.s.c.Overseer Overseer 
(id=98522998678224900-127.0.0.1:33981_m_w-n_0000000000) starting
   [junit4]   2> 202755 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:33981_m_w    
] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:33981_m_w
   [junit4]   2> 202755 INFO  
(zkCallback-311-thread-1-processing-n:127.0.0.1:33981_m_w) 
[n:127.0.0.1:33981_m_w    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (0) -> (1)
   [junit4]   2> 202868 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:33981_m_w    
] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 
'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 202874 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:33981_m_w    
] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') 
enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 202874 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:33981_m_w    
] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 
'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 202875 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:33981_m_w    
] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/control-001/cores
   [junit4]   2> 202889 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 202889 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:44917/solr ready
   [junit4]   2> 202890 INFO  (SocketProxy-Acceptor-33981) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=59266,localport=33981], receiveBufferSize:531000
   [junit4]   2> 202901 INFO  (SocketProxy-Acceptor-33981) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=36377,localport=53550], receiveBufferSize=530904
   [junit4]   2> 202901 INFO  (qtp21158870-2056) [n:127.0.0.1:33981_m_w    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params 
replicationFactor=1&collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:33981_m_w&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 202902 INFO  
(OverseerThreadFactory-1123-thread-1-processing-n:127.0.0.1:33981_m_w) 
[n:127.0.0.1:33981_m_w    ] o.a.s.c.CreateCollectionCmd Create collection 
control_collection
   [junit4]   2> 203006 INFO  (SocketProxy-Acceptor-33981) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=59270,localport=33981], receiveBufferSize:531000
   [junit4]   2> 203008 INFO  (SocketProxy-Acceptor-33981) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=36377,localport=53554], receiveBufferSize=530904
   [junit4]   2> 203008 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w    ] 
o.a.s.h.a.CoreAdminOperation core create command 
qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT
   [junit4]   2> 203008 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w    ] 
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 
transient cores
   [junit4]   2> 203111 INFO  
(zkCallback-311-thread-1-processing-n:127.0.0.1:33981_m_w) 
[n:127.0.0.1:33981_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/control_collection/state.json] for collection 
[control_collection] has occurred - updating... (live nodes size: [1])
   [junit4]   2> 204025 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SolrConfig Using Lucene MatchVersion: 8.0.0
   [junit4]   2> 204039 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.s.IndexSchema [control_collection_shard1_replica_n1] Schema name=test
   [junit4]   2> 204090 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 204097 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.CoreContainer Creating SolrCore 'control_collection_shard1_replica_n1' 
using configuration from collection control_collection, trusted=true
   [junit4]   2> 204097 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.core.control_collection.shard1.replica_n1' (registry 
'solr.core.control_collection.shard1.replica_n1') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 204097 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
   [junit4]   2> 204097 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SolrCore [[control_collection_shard1_replica_n1] ] Opening new SolrCore 
at 
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/control-001/cores/control_collection_shard1_replica_n1],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/control-001/cores/control_collection_shard1_replica_n1/data/]
   [junit4]   2> 204099 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: 
maxMergeAtOnce=27, maxMergeAtOnceExplicit=33, maxMergedSegmentMB=14.224609375, 
floorSegmentMB=0.203125, forceMergeDeletesPctAllowed=29.913688797449645, 
segmentsPerTier=36.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
   [junit4]   2> 204101 WARN  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,attributes = 
{initParams=a, name=/dump, class=DumpRequestHandler},args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 204126 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.UpdateHandler Using UpdateLog implementation: 
org.apache.solr.update.UpdateLog
   [junit4]   2> 204126 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH 
numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 204127 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 204127 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 204128 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: 
minMergeSize=1000, mergeFactor=22, maxMergeSize=9223372036854775807, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.7435781152372346]
   [junit4]   2> 204128 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.s.SolrIndexSearcher Opening 
[Searcher@1706190[control_collection_shard1_replica_n1] main]
   [junit4]   2> 204129 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 204129 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 204129 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 204130 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.UpdateLog Could not find max version in index or recent updates, using 
new clock 1576367980510445568
   [junit4]   2> 204130 INFO  
(searcherExecutor-1126-thread-1-processing-n:127.0.0.1:33981_m_w 
x:control_collection_shard1_replica_n1 s:shard1 c:control_collection) 
[n:127.0.0.1:33981_m_w c:control_collection s:shard1  
x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore 
[control_collection_shard1_replica_n1] Registered new searcher 
Searcher@1706190[control_collection_shard1_replica_n1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 204132 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 204132 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 204132 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SyncStrategy Sync replicas to 
http://127.0.0.1:33981/m_w/control_collection_shard1_replica_n1/
   [junit4]   2> 204132 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 204133 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SyncStrategy 
http://127.0.0.1:33981/m_w/control_collection_shard1_replica_n1/ has no replicas
   [junit4]   2> 204133 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.ShardLeaderElectionContext Found all replicas participating in 
election, clear LIR
   [junit4]   2> 204134 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.ShardLeaderElectionContext I am the new leader: 
http://127.0.0.1:33981/m_w/control_collection_shard1_replica_n1/ shard1
   [junit4]   2> 204235 INFO  
(zkCallback-311-thread-1-processing-n:127.0.0.1:33981_m_w) 
[n:127.0.0.1:33981_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/control_collection/state.json] for collection 
[control_collection] has occurred - updating... (live nodes size: [1])
   [junit4]   2> 204284 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 204286 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores 
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT}
 status=0 QTime=1277
   [junit4]   2> 204289 INFO  (qtp21158870-2056) [n:127.0.0.1:33981_m_w    ] 
o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 
30 seconds. Check all shard replicas
   [junit4]   2> 204389 INFO  
(zkCallback-311-thread-1-processing-n:127.0.0.1:33981_m_w) 
[n:127.0.0.1:33981_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/control_collection/state.json] for collection 
[control_collection] has occurred - updating... (live nodes size: [1])
   [junit4]   2> 204904 INFO  
(OverseerCollectionConfigSetProcessor-98522998678224900-127.0.0.1:33981_m_w-n_0000000000)
 [n:127.0.0.1:33981_m_w    ] o.a.s.c.OverseerTaskQueue Response ZK path: 
/overseer/collection-queue-work/qnr-0000000000 doesn't exist.  Requestor may 
have disconnected from ZooKeeper
   [junit4]   2> 205289 INFO  (qtp21158870-2056) [n:127.0.0.1:33981_m_w    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections 
params={replicationFactor=1&collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:33981_m_w&wt=javabin&version=2}
 status=0 QTime=2387
   [junit4]   2> 205294 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 205294 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:44917/solr ready
   [junit4]   2> 205294 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.a.s.c.ChaosMonkey 
monkey: init - expire sessions:false cause connection loss:false
   [junit4]   2> 205295 INFO  (SocketProxy-Acceptor-33981) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=59290,localport=33981], receiveBufferSize:531000
   [junit4]   2> 205298 INFO  (SocketProxy-Acceptor-33981) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=36377,localport=53574], receiveBufferSize=530904
   [junit4]   2> 205299 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params 
replicationFactor=1&collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=2&createNodeSet=&stateFormat=2&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 205300 INFO  
(OverseerThreadFactory-1123-thread-2-processing-n:127.0.0.1:33981_m_w) 
[n:127.0.0.1:33981_m_w    ] o.a.s.c.CreateCollectionCmd Create collection 
collection1
   [junit4]   2> 205300 WARN  
(OverseerThreadFactory-1123-thread-2-processing-n:127.0.0.1:33981_m_w) 
[n:127.0.0.1:33981_m_w    ] o.a.s.c.CreateCollectionCmd It is unusual to create 
a collection (collection1) without cores.
   [junit4]   2> 205502 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w    ] 
o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 
30 seconds. Check all shard replicas
   [junit4]   2> 205502 INFO  (qtp21158870-2058) [n:127.0.0.1:33981_m_w    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections 
params={replicationFactor=1&collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=2&createNodeSet=&stateFormat=2&wt=javabin&version=2}
 status=0 QTime=203
   [junit4]   2> 205562 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-1-001
 of type TLOG
   [junit4]   2> 205562 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.e.j.s.Server 
jetty-9.3.20.v20170531
   [junit4]   2> 205563 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@6818db{/m_w,null,AVAILABLE}
   [junit4]   2> 205563 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@e60df0{HTTP/1.1,[http/1.1]}{127.0.0.1:35543}
   [junit4]   2> 205563 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.e.j.s.Server 
Started @207120ms
   [junit4]   2> 205563 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/tempDir-001/jetty1,
 replicaType=TLOG, solrconfig=solrconfig.xml, hostContext=/m_w, hostPort=36769, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-1-001/cores}
   [junit4]   2> 205564 ERROR 
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 205564 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
8.0.0
   [junit4]   2> 205564 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 205564 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null, Default config 
dir: null
   [junit4]   2> 205564 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2017-08-21T18:54:17.652Z
   [junit4]   2> 205566 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 205566 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-1-001/solr.xml
   [junit4]   2> 205568 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.SolrXmlConfig MBean server found: 
com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b, but no JMX reporters were 
configured - adding default JMX reporter.
   [junit4]   2> 205570 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.a.s.c.ZkContainer 
Zookeeper client=127.0.0.1:44917/solr
   [junit4]   2> 205574 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:36769_m_w    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 205574 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:36769_m_w    
] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 205575 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:36769_m_w    
] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:36769_m_w
   [junit4]   2> 205576 INFO  (zkCallback-318-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 205576 INFO  
(zkCallback-311-thread-1-processing-n:127.0.0.1:33981_m_w) 
[n:127.0.0.1:33981_m_w    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (1) -> (2)
   [junit4]   2> 205576 INFO  
(zkCallback-323-thread-1-processing-n:127.0.0.1:36769_m_w) 
[n:127.0.0.1:36769_m_w    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (1) -> (2)
   [junit4]   2> 205619 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:36769_m_w    
] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 
'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 205626 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:36769_m_w    
] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') 
enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 205626 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:36769_m_w    
] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 
'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 205628 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:36769_m_w    
] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-1-001/cores
   [junit4]   2> 205648 INFO  (SocketProxy-Acceptor-36769) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=38394,localport=36769], receiveBufferSize:531000
   [junit4]   2> 205648 INFO  (SocketProxy-Acceptor-36769) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=35543,localport=50634], receiveBufferSize=530904
   [junit4]   2> 205649 INFO  (qtp29713451-2104) [n:127.0.0.1:36769_m_w    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params 
node=127.0.0.1:36769_m_w&action=ADDREPLICA&collection=collection1&shard=shard2&type=TLOG&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 205653 INFO  
(OverseerCollectionConfigSetProcessor-98522998678224900-127.0.0.1:33981_m_w-n_0000000000)
 [n:127.0.0.1:33981_m_w    ] o.a.s.c.OverseerTaskQueue Response ZK path: 
/overseer/collection-queue-work/qnr-0000000002 doesn't exist.  Requestor may 
have disconnected from ZooKeeper
   [junit4]   2> 205653 INFO  
(OverseerThreadFactory-1123-thread-3-processing-n:127.0.0.1:33981_m_w) 
[n:127.0.0.1:33981_m_w    ] o.a.s.c.AddReplicaCmd Node Identified 
127.0.0.1:36769_m_w for creating new replica
   [junit4]   2> 205654 INFO  (SocketProxy-Acceptor-36769) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=38398,localport=36769], receiveBufferSize:531000
   [junit4]   2> 205654 INFO  (SocketProxy-Acceptor-36769) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=35543,localport=50638], receiveBufferSize=530904
   [junit4]   2> 205655 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w    ] 
o.a.s.h.a.CoreAdminOperation core create command 
qt=/admin/cores&collection.configName=conf1&name=collection1_shard2_replica_t41&action=CREATE&collection=collection1&shard=shard2&wt=javabin&version=2&replicaType=TLOG
   [junit4]   2> 205655 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w    ] 
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 
transient cores
   [junit4]   2> 205761 INFO  
(zkCallback-323-thread-1-processing-n:127.0.0.1:36769_m_w) 
[n:127.0.0.1:36769_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [2])
   [junit4]   2> 206663 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.SolrConfig 
Using Lucene MatchVersion: 8.0.0
   [junit4]   2> 206675 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.s.IndexSchema 
[collection1_shard2_replica_t41] Schema name=test
   [junit4]   2> 206724 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.s.IndexSchema 
Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 206731 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1_shard2_replica_t41' using configuration from 
collection collection1, trusted=true
   [junit4]   2> 206732 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] 
o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.core.collection1.shard2.replica_t41' (registry 
'solr.core.collection1.shard2.replica_t41') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 206732 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.SolrCore 
solr.RecoveryStrategy.Builder
   [junit4]   2> 206732 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.SolrCore 
[[collection1_shard2_replica_t41] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-1-001/cores/collection1_shard2_replica_t41],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-1-001/cores/collection1_shard2_replica_t41/data/]
   [junit4]   2> 206734 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: 
maxMergeAtOnce=27, maxMergeAtOnceExplicit=33, maxMergedSegmentMB=14.224609375, 
floorSegmentMB=0.203125, forceMergeDeletesPctAllowed=29.913688797449645, 
segmentsPerTier=36.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
   [junit4]   2> 206735 WARN  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,attributes = 
{initParams=a, name=/dump, class=DumpRequestHandler},args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 206760 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.u.UpdateHandler 
Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 206760 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 206761 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.u.CommitTracker 
Hard AutoCommit: disabled
   [junit4]   2> 206761 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.u.CommitTracker 
Soft AutoCommit: disabled
   [junit4]   2> 206761 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: 
minMergeSize=1000, mergeFactor=22, maxMergeSize=9223372036854775807, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.7435781152372346]
   [junit4]   2> 206762 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] 
o.a.s.s.SolrIndexSearcher Opening 
[Searcher@925c21[collection1_shard2_replica_t41] main]
   [junit4]   2> 206763 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 206763 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 206763 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] 
o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 206764 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.u.UpdateLog 
Could not find max version in index or recent updates, using new clock 
1576367983272394752
   [junit4]   2> 206764 INFO  
(searcherExecutor-1137-thread-1-processing-n:127.0.0.1:36769_m_w 
x:collection1_shard2_replica_t41 s:shard2 c:collection1) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.SolrCore 
[collection1_shard2_replica_t41] Registered new searcher 
Searcher@925c21[collection1_shard2_replica_t41] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 206767 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] 
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 206767 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] 
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 206767 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.SyncStrategy 
Sync replicas to http://127.0.0.1:36769/m_w/collection1_shard2_replica_t41/
   [junit4]   2> 206767 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.SyncStrategy 
Sync Success - now sync replicas to me
   [junit4]   2> 206767 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.SyncStrategy 
http://127.0.0.1:36769/m_w/collection1_shard2_replica_t41/ has no replicas
   [junit4]   2> 206767 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] 
o.a.s.c.ShardLeaderElectionContext Found all replicas participating in 
election, clear LIR
   [junit4]   2> 206767 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.ZkController 
collection1_shard2_replica_t41 stopping background replication from leader
   [junit4]   2> 206769 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] 
o.a.s.c.ShardLeaderElectionContext I am the new leader: 
http://127.0.0.1:36769/m_w/collection1_shard2_replica_t41/ shard2
   [junit4]   2> 206870 INFO  
(zkCallback-323-thread-1-processing-n:127.0.0.1:36769_m_w) 
[n:127.0.0.1:36769_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [2])
   [junit4]   2> 206919 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.ZkController 
I am the leader, no recovery necessary
   [junit4]   2> 206921 INFO  (qtp29713451-2106) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.s.HttpSolrCall 
[admin] webapp=null path=/admin/cores 
params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard2_replica_t41&action=CREATE&collection=collection1&shard=shard2&wt=javabin&version=2&replicaType=TLOG}
 status=0 QTime=1266
   [junit4]   2> 206924 INFO  (qtp29713451-2104) [n:127.0.0.1:36769_m_w    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections 
params={node=127.0.0.1:36769_m_w&action=ADDREPLICA&collection=collection1&shard=shard2&type=TLOG&wt=javabin&version=2}
 status=0 QTime=1274
   [junit4]   2> 207006 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-2-001
 of type TLOG
   [junit4]   2> 207006 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.e.j.s.Server 
jetty-9.3.20.v20170531
   [junit4]   2> 207007 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@460247{/m_w,null,AVAILABLE}
   [junit4]   2> 207007 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@e24e10{HTTP/1.1,[http/1.1]}{127.0.0.1:45365}
   [junit4]   2> 207008 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.e.j.s.Server 
Started @208564ms
   [junit4]   2> 207008 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/tempDir-001/jetty2,
 replicaType=TLOG, solrconfig=solrconfig.xml, hostContext=/m_w, hostPort=46785, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-2-001/cores}
   [junit4]   2> 207008 ERROR 
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 207008 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
8.0.0
   [junit4]   2> 207008 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 207008 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null, Default config 
dir: null
   [junit4]   2> 207008 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2017-08-21T18:54:19.096Z
   [junit4]   2> 207010 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 207011 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-2-001/solr.xml
   [junit4]   2> 207013 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.SolrXmlConfig MBean server found: 
com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b, but no JMX reporters were 
configured - adding default JMX reporter.
   [junit4]   2> 207015 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.a.s.c.ZkContainer 
Zookeeper client=127.0.0.1:44917/solr
   [junit4]   2> 207020 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:46785_m_w    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 207021 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:46785_m_w    
] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 207021 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:46785_m_w    
] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:46785_m_w
   [junit4]   2> 207022 INFO  (zkCallback-318-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 207022 INFO  
(zkCallback-311-thread-2-processing-n:127.0.0.1:33981_m_w) 
[n:127.0.0.1:33981_m_w    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (2) -> (3)
   [junit4]   2> 207022 INFO  
(zkCallback-323-thread-1-processing-n:127.0.0.1:36769_m_w) 
[n:127.0.0.1:36769_m_w    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (2) -> (3)
   [junit4]   2> 207022 INFO  
(zkCallback-329-thread-1-processing-n:127.0.0.1:46785_m_w) 
[n:127.0.0.1:46785_m_w    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (2) -> (3)
   [junit4]   2> 207061 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:46785_m_w    
] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 
'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 207068 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:46785_m_w    
] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') 
enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 207068 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:46785_m_w    
] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 
'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 207069 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:46785_m_w    
] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-2-001/cores
   [junit4]   2> 207089 INFO  (qtp29713451-2105) [n:127.0.0.1:36769_m_w    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params 
node=127.0.0.1:46785_m_w&action=ADDREPLICA&collection=collection1&shard=shard1&type=TLOG&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 207091 INFO  
(OverseerCollectionConfigSetProcessor-98522998678224900-127.0.0.1:33981_m_w-n_0000000000)
 [n:127.0.0.1:33981_m_w    ] o.a.s.c.OverseerTaskQueue Response ZK path: 
/overseer/collection-queue-work/qnr-0000000004 doesn't exist.  Requestor may 
have disconnected from ZooKeeper
   [junit4]   2> 207091 INFO  
(OverseerThreadFactory-1123-thread-4-processing-n:127.0.0.1:33981_m_w) 
[n:127.0.0.1:33981_m_w    ] o.a.s.c.AddReplicaCmd Node Identified 
127.0.0.1:46785_m_w for creating new replica
   [junit4]   2> 207092 INFO  (SocketProxy-Acceptor-46785) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=34198,localport=46785], receiveBufferSize:531000
   [junit4]   2> 207092 INFO  (SocketProxy-Acceptor-46785) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=45365,localport=34872], receiveBufferSize=530904
   [junit4]   2> 207093 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w    ] 
o.a.s.h.a.CoreAdminOperation core create command 
qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_t43&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=TLOG
   [junit4]   2> 207093 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w    ] 
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 
transient cores
   [junit4]   2> 207196 INFO  
(zkCallback-323-thread-1-processing-n:127.0.0.1:36769_m_w) 
[n:127.0.0.1:36769_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 207196 INFO  
(zkCallback-329-thread-1-processing-n:127.0.0.1:46785_m_w) 
[n:127.0.0.1:46785_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 208103 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.SolrConfig 
Using Lucene MatchVersion: 8.0.0
   [junit4]   2> 208112 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.s.IndexSchema 
[collection1_shard1_replica_t43] Schema name=test
   [junit4]   2> 208165 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.s.IndexSchema 
Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 208173 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1_shard1_replica_t43' using configuration from 
collection collection1, trusted=true
   [junit4]   2> 208173 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] 
o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.core.collection1.shard1.replica_t43' (registry 
'solr.core.collection1.shard1.replica_t43') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 208173 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.SolrCore 
solr.RecoveryStrategy.Builder
   [junit4]   2> 208173 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.SolrCore 
[[collection1_shard1_replica_t43] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-2-001/cores/collection1_shard1_replica_t43],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-2-001/cores/collection1_shard1_replica_t43/data/]
   [junit4]   2> 208176 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: 
maxMergeAtOnce=27, maxMergeAtOnceExplicit=33, maxMergedSegmentMB=14.224609375, 
floorSegmentMB=0.203125, forceMergeDeletesPctAllowed=29.913688797449645, 
segmentsPerTier=36.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
   [junit4]   2> 208180 WARN  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,attributes = 
{initParams=a, name=/dump, class=DumpRequestHandler},args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 208221 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.u.UpdateHandler 
Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 208221 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 208222 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.u.CommitTracker 
Hard AutoCommit: disabled
   [junit4]   2> 208222 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.u.CommitTracker 
Soft AutoCommit: disabled
   [junit4]   2> 208223 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: 
minMergeSize=1000, mergeFactor=22, maxMergeSize=9223372036854775807, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.7435781152372346]
   [junit4]   2> 208223 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] 
o.a.s.s.SolrIndexSearcher Opening 
[Searcher@aeae8e[collection1_shard1_replica_t43] main]
   [junit4]   2> 208224 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 208224 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 208224 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] 
o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 208225 INFO  
(searcherExecutor-1148-thread-1-processing-n:127.0.0.1:46785_m_w 
x:collection1_shard1_replica_t43 s:shard1 c:collection1) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.SolrCore 
[collection1_shard1_replica_t43] Registered new searcher 
Searcher@aeae8e[collection1_shard1_replica_t43] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 208225 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.u.UpdateLog 
Could not find max version in index or recent updates, using new clock 
1576367984804364288
   [junit4]   2> 208228 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] 
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 208228 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] 
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 208228 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.SyncStrategy 
Sync replicas to http://127.0.0.1:46785/m_w/collection1_shard1_replica_t43/
   [junit4]   2> 208228 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.SyncStrategy 
Sync Success - now sync replicas to me
   [junit4]   2> 208228 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.SyncStrategy 
http://127.0.0.1:46785/m_w/collection1_shard1_replica_t43/ has no replicas
   [junit4]   2> 208228 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] 
o.a.s.c.ShardLeaderElectionContext Found all replicas participating in 
election, clear LIR
   [junit4]   2> 208228 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.ZkController 
collection1_shard1_replica_t43 stopping background replication from leader
   [junit4]   2> 208229 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] 
o.a.s.c.ShardLeaderElectionContext I am the new leader: 
http://127.0.0.1:46785/m_w/collection1_shard1_replica_t43/ shard1
   [junit4]   2> 208331 INFO  
(zkCallback-329-thread-1-processing-n:127.0.0.1:46785_m_w) 
[n:127.0.0.1:46785_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 208331 INFO  
(zkCallback-323-thread-1-processing-n:127.0.0.1:36769_m_w) 
[n:127.0.0.1:36769_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 208380 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.ZkController 
I am the leader, no recovery necessary
   [junit4]   2> 208382 INFO  (qtp26268127-2139) [n:127.0.0.1:46785_m_w 
c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.s.HttpSolrCall 
[admin] webapp=null path=/admin/cores 
params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_t43&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=TLOG}
 status=0 QTime=1288
   [junit4]   2> 208384 INFO  (qtp29713451-2105) [n:127.0.0.1:36769_m_w    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections 
params={node=127.0.0.1:46785_m_w&action=ADDREPLICA&collection=collection1&shard=shard1&type=TLOG&wt=javabin&version=2}
 status=0 QTime=1294
   [junit4]   2> 208464 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-3-001
 of type TLOG
   [junit4]   2> 208465 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.e.j.s.Server 
jetty-9.3.20.v20170531
   [junit4]   2> 208465 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@a6f516{/m_w,null,AVAILABLE}
   [junit4]   2> 208466 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@f9c99d{HTTP/1.1,[http/1.1]}{127.0.0.1:40925}
   [junit4]   2> 208466 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.e.j.s.Server 
Started @210023ms
   [junit4]   2> 208466 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/tempDir-001/jetty3,
 replicaType=TLOG, solrconfig=solrconfig.xml, hostContext=/m_w, hostPort=37479, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-3-001/cores}
   [junit4]   2> 208466 ERROR 
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 208466 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
8.0.0
   [junit4]   2> 208466 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 208466 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null, Default config 
dir: null
   [junit4]   2> 208466 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2017-08-21T18:54:20.554Z
   [junit4]   2> 208468 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 208469 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-3-001/solr.xml
   [junit4]   2> 208471 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.SolrXmlConfig MBean server found: 
com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b, but no JMX reporters were 
configured - adding default JMX reporter.
   [junit4]   2> 208475 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.a.s.c.ZkContainer 
Zookeeper client=127.0.0.1:44917/solr
   [junit4]   2> 208482 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:37479_m_w    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3)
   [junit4]   2> 208483 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:37479_m_w    
] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 208483 INFO  
(zkCallback-329-thread-1-processing-n:127.0.0.1:46785_m_w) 
[n:127.0.0.1:46785_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 208483 INFO  
(zkCallback-323-thread-1-processing-n:127.0.0.1:36769_m_w) 
[n:127.0.0.1:36769_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 208485 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:37479_m_w    
] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:37479_m_w
   [junit4]   2> 208485 INFO  (zkCallback-318-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 208485 INFO  
(zkCallback-323-thread-1-processing-n:127.0.0.1:36769_m_w) 
[n:127.0.0.1:36769_m_w    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (3) -> (4)
   [junit4]   2> 208485 INFO  
(zkCallback-311-thread-2-processing-n:127.0.0.1:33981_m_w) 
[n:127.0.0.1:33981_m_w    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (3) -> (4)
   [junit4]   2> 208485 INFO  
(zkCallback-329-thread-1-processing-n:127.0.0.1:46785_m_w) 
[n:127.0.0.1:46785_m_w    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (3) -> (4)
   [junit4]   2> 208485 INFO  
(zkCallback-335-thread-1-processing-n:127.0.0.1:37479_m_w) 
[n:127.0.0.1:37479_m_w    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (3) -> (4)
   [junit4]   2> 208539 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:37479_m_w    
] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 
'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 208546 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:37479_m_w    
] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') 
enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 208546 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:37479_m_w    
] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 
'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 208547 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [n:127.0.0.1:37479_m_w    
] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-3-001/cores
   [junit4]   2> 208575 INFO  (qtp29713451-2100) [n:127.0.0.1:36769_m_w    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params 
node=127.0.0.1:37479_m_w&action=ADDREPLICA&collection=collection1&shard=shard2&type=TLOG&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 208576 INFO  
(OverseerCollectionConfigSetProcessor-98522998678224900-127.0.0.1:33981_m_w-n_0000000000)
 [n:127.0.0.1:33981_m_w    ] o.a.s.c.OverseerTaskQueue Response ZK path: 
/overseer/collection-queue-work/qnr-0000000006 doesn't exist.  Requestor may 
have disconnected from ZooKeeper
   [junit4]   2> 208576 INFO  
(OverseerThreadFactory-1123-thread-5-processing-n:127.0.0.1:33981_m_w) 
[n:127.0.0.1:33981_m_w    ] o.a.s.c.AddReplicaCmd Node Identified 
127.0.0.1:37479_m_w for creating new replica
   [junit4]   2> 208577 INFO  (SocketProxy-Acceptor-37479) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=38586,localport=37479], receiveBufferSize:531000
   [junit4]   2> 208577 INFO  (SocketProxy-Acceptor-37479) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=40925,localport=56746], receiveBufferSize=530904
   [junit4]   2> 208578 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w    ] 
o.a.s.h.a.CoreAdminOperation core create command 
qt=/admin/cores&collection.configName=conf1&name=collection1_shard2_replica_t45&action=CREATE&collection=collection1&shard=shard2&wt=javabin&version=2&replicaType=TLOG
   [junit4]   2> 208579 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w    ] 
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 
transient cores
   [junit4]   2> 208681 INFO  
(zkCallback-329-thread-1-processing-n:127.0.0.1:46785_m_w) 
[n:127.0.0.1:46785_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 208681 INFO  
(zkCallback-323-thread-1-processing-n:127.0.0.1:36769_m_w) 
[n:127.0.0.1:36769_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 208681 INFO  
(zkCallback-335-thread-1-processing-n:127.0.0.1:37479_m_w) 
[n:127.0.0.1:37479_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 209592 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.c.SolrConfig 
Using Lucene MatchVersion: 8.0.0
   [junit4]   2> 209600 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.s.IndexSchema 
[collection1_shard2_replica_t45] Schema name=test
   [junit4]   2> 209652 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.s.IndexSchema 
Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 209659 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1_shard2_replica_t45' using configuration from 
collection collection1, trusted=true
   [junit4]   2> 209659 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] 
o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.core.collection1.shard2.replica_t45' (registry 
'solr.core.collection1.shard2.replica_t45') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@1f2b25b
   [junit4]   2> 209660 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.c.SolrCore 
solr.RecoveryStrategy.Builder
   [junit4]   2> 209660 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.c.SolrCore 
[[collection1_shard2_replica_t45] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-3-001/cores/collection1_shard2_replica_t45],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001/shard-3-001/cores/collection1_shard2_replica_t45/data/]
   [junit4]   2> 209662 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: 
maxMergeAtOnce=27, maxMergeAtOnceExplicit=33, maxMergedSegmentMB=14.224609375, 
floorSegmentMB=0.203125, forceMergeDeletesPctAllowed=29.913688797449645, 
segmentsPerTier=36.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
   [junit4]   2> 209664 WARN  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,attributes = 
{initParams=a, name=/dump, class=DumpRequestHandler},args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 209691 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.u.UpdateHandler 
Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 209691 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 209692 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.u.CommitTracker 
Hard AutoCommit: disabled
   [junit4]   2> 209692 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.u.CommitTracker 
Soft AutoCommit: disabled
   [junit4]   2> 209693 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: 
minMergeSize=1000, mergeFactor=22, maxMergeSize=9223372036854775807, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.7435781152372346]
   [junit4]   2> 209693 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] 
o.a.s.s.SolrIndexSearcher Opening 
[Searcher@1a387b7[collection1_shard2_replica_t45] main]
   [junit4]   2> 209694 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 209694 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 209694 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] 
o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 209695 INFO  
(searcherExecutor-1159-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.c.SolrCore 
[collection1_shard2_replica_t45] Registered new searcher 
Searcher@1a387b7[collection1_shard2_replica_t45] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 209695 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.u.UpdateLog 
Could not find max version in index or recent updates, using new clock 
1576367986345771008
   [junit4]   2> 209698 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.c.ZkController 
Core needs to recover:collection1_shard2_replica_t45
   [junit4]   2> 209698 INFO  
(updateExecutor-332-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] 
o.a.s.u.DefaultSolrCoreState Running recovery
   [junit4]   2> 209698 INFO  
(recoveryExecutor-333-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Starting recovery 
process. recoveringAfterStartup=true
   [junit4]   2> 209699 INFO  
(recoveryExecutor-333-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy ###### 
startupVersions=[[]]
   [junit4]   2> 209699 INFO  
(recoveryExecutor-333-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.c.ZkController 
collection1_shard2_replica_t45 stopping background replication from leader
   [junit4]   2> 209699 INFO  
(recoveryExecutor-333-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Begin buffering 
updates. core=[collection1_shard2_replica_t45]
   [junit4]   2> 209699 INFO  
(recoveryExecutor-333-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.u.UpdateLog Starting to buffer updates. 
FSUpdateLog{state=ACTIVE, tlog=null}
   [junit4]   2> 209699 INFO  
(recoveryExecutor-333-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Publishing state of 
core [collection1_shard2_replica_t45] as recovering, leader is 
[http://127.0.0.1:36769/m_w/collection1_shard2_replica_t41/] and I am 
[http://127.0.0.1:37479/m_w/collection1_shard2_replica_t45/]
   [junit4]   2> 209700 INFO  (qtp15304468-2171) [n:127.0.0.1:37479_m_w 
c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.s.HttpSolrCall 
[admin] webapp=null path=/admin/cores 
params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard2_replica_t45&action=CREATE&collection=collection1&shard=shard2&wt=javabin&version=2&replicaType=TLOG}
 status=0 QTime=1121
   [junit4]   2> 209701 INFO  (qtp29713451-2100) [n:127.0.0.1:36769_m_w    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections 
params={node=127.0.0.1:37479_m_w&action=ADDREPLICA&collection=collection1&shard=shard2&type=TLOG&wt=javabin&version=2}
 status=0 QTime=1126
   [junit4]   2> 209702 INFO  
(recoveryExecutor-333-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Sending prep 
recovery command to [http://127.0.0.1:36769/m_w]; [WaitForState: 
action=PREPRECOVERY&core=collection1_shard2_replica_t41&nodeName=127.0.0.1:37479_m_w&coreNodeName=core_node46&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
   [junit4]   2> 209702 INFO  (SocketProxy-Acceptor-36769) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=38444,localport=36769], receiveBufferSize:531000
   [junit4]   2> 209702 INFO  (SocketProxy-Acceptor-36769) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=35543,localport=50684], receiveBufferSize=530904
   [junit4]   2> 209709 INFO  (qtp29713451-2101) [n:127.0.0.1:36769_m_w    ] 
o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node46, state: 
recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true, 
maxTime: 183 s
   [junit4]   2> 209709 INFO  (qtp29713451-2101) [n:127.0.0.1:36769_m_w    ] 
o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, 
shard=shard2, thisCore=collection1_shard2_replica_t41, 
leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, 
currentState=down, localState=active, nodeName=127.0.0.1:37479_m_w, 
coreNodeName=core_node46, onlyIfActiveCheckResult=false, nodeProps: 
core_node46:{"core":"collection1_shard2_replica_t45","base_url":"http://127.0.0.1:37479/m_w","node_name":"127.0.0.1:37479_m_w","state":"down","type":"TLOG"}
   [junit4]   2> 209710 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.SolrTestCaseJ4 ###Starting test
   [junit4]   2> 209710 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase Wait for recoveries to finish - wait 
30000 for each attempt
   [junit4]   2> 209710 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection: 
collection1 failOnTimeout:true timeout (sec):30000
   [junit4]   2> 209802 INFO  
(zkCallback-329-thread-1-processing-n:127.0.0.1:46785_m_w) 
[n:127.0.0.1:46785_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 209802 INFO  
(zkCallback-335-thread-1-processing-n:127.0.0.1:37479_m_w) 
[n:127.0.0.1:37479_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 209802 INFO  
(zkCallback-323-thread-1-processing-n:127.0.0.1:36769_m_w) 
[n:127.0.0.1:36769_m_w    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 210578 INFO  
(OverseerCollectionConfigSetProcessor-98522998678224900-127.0.0.1:33981_m_w-n_0000000000)
 [n:127.0.0.1:33981_m_w    ] o.a.s.c.OverseerTaskQueue Response ZK path: 
/overseer/collection-queue-work/qnr-0000000008 doesn't exist.  Requestor may 
have disconnected from ZooKeeper
   [junit4]   2> 210709 INFO  (qtp29713451-2101) [n:127.0.0.1:36769_m_w    ] 
o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, 
shard=shard2, thisCore=collection1_shard2_replica_t41, 
leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, 
currentState=recovering, localState=active, nodeName=127.0.0.1:37479_m_w, 
coreNodeName=core_node46, onlyIfActiveCheckResult=false, nodeProps: 
core_node46:{"core":"collection1_shard2_replica_t45","base_url":"http://127.0.0.1:37479/m_w","node_name":"127.0.0.1:37479_m_w","state":"recovering","type":"TLOG"}
   [junit4]   2> 210710 INFO  (qtp29713451-2101) [n:127.0.0.1:36769_m_w    ] 
o.a.s.h.a.PrepRecoveryOp Waited coreNodeName: core_node46, state: recovering, 
checkLive: true, onlyIfLeader: true for: 1 seconds.
   [junit4]   2> 210710 INFO  (qtp29713451-2101) [n:127.0.0.1:36769_m_w    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores 
params={nodeName=127.0.0.1:37479_m_w&onlyIfLeaderActive=true&core=collection1_shard2_replica_t41&coreNodeName=core_node46&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2}
 status=0 QTime=1000
   [junit4]   2> 211211 INFO  
(recoveryExecutor-333-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Starting Replication 
Recovery.
   [junit4]   2> 211211 INFO  
(recoveryExecutor-333-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Attempting to 
replicate from [http://127.0.0.1:36769/m_w/collection1_shard2_replica_t41/].
   [junit4]   2> 211212 INFO  (SocketProxy-Acceptor-36769) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=38458,localport=36769], receiveBufferSize:531000
   [junit4]   2> 211213 INFO  (SocketProxy-Acceptor-36769) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=35543,localport=50698], receiveBufferSize=530904
   [junit4]   2> 211217 INFO  (qtp29713451-2101) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_t41] 
o.a.s.u.DirectUpdateHandler2 start 
commit{_version_=1576367987941703680,optimize=false,openSearcher=false,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 211217 INFO  (qtp29713451-2101) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_t41] 
o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 211218 INFO  (qtp29713451-2101) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_t41] 
o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 211218 INFO  (qtp29713451-2101) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_t41] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1_shard2_replica_t41]  
webapp=/m_w path=/update 
params={waitSearcher=true&openSearcher=false&commit=true&softCommit=false&commit_end_point=true&wt=javabin&version=2}{commit=}
 0 1
   [junit4]   2> 211219 INFO  (SocketProxy-Acceptor-36769) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=38462,localport=36769], receiveBufferSize:531000
   [junit4]   2> 211220 INFO  (SocketProxy-Acceptor-36769) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=35543,localport=50702], receiveBufferSize=530904
   [junit4]   2> 211221 INFO  (qtp29713451-2099) [n:127.0.0.1:36769_m_w 
c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_t41] 
o.a.s.c.S.Request [collection1_shard2_replica_t41]  webapp=/m_w 
path=/replication 
params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 
QTime=0
   [junit4]   2> 211222 INFO  
(recoveryExecutor-333-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.h.IndexFetcher Master's generation: 1
   [junit4]   2> 211222 INFO  
(recoveryExecutor-333-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.h.IndexFetcher Master's version: 0
   [junit4]   2> 211222 INFO  
(recoveryExecutor-333-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.h.IndexFetcher Slave's generation: 1
   [junit4]   2> 211222 INFO  
(recoveryExecutor-333-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.h.IndexFetcher Slave's version: 0
   [junit4]   2> 211222 INFO  
(recoveryExecutor-333-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Replication Recovery 
was successful.
   [junit4]   2> 211222 INFO  
(recoveryExecutor-333-thread-1-processing-n:127.0.0.1:37479_m_w 
x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Registeri

[...truncated too long message...]

.1:37479_m_w-n_0000000003) [n:127.0.0.1:37479_m_w    ] o.a.s.c.Overseer 
Overseer Loop exiting : 127.0.0.1:37479_m_w
   [junit4]   2> 231726 WARN  
(zkCallback-335-thread-3-processing-n:127.0.0.1:37479_m_w) 
[n:127.0.0.1:37479_m_w    ] o.a.s.c.c.ZkStateReader ZooKeeper watch triggered, 
but Solr cannot talk to ZK: [KeeperErrorCode = Session expired for /live_nodes]
   [junit4]   2> 231726 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.e.j.s.AbstractConnector Stopped 
ServerConnector@f9c99d{HTTP/1.1,[http/1.1]}{127.0.0.1:0}
   [junit4]   2> 231727 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.e.j.s.h.ContextHandler Stopped 
o.e.j.s.ServletContextHandler@a6f516{/m_w,null,UNAVAILABLE}
   [junit4]   2> 231727 ERROR 
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper 
server won't take any action on ERROR or SHUTDOWN server state changes
   [junit4]   2> 231727 INFO  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] 
o.a.s.c.ZkTestServer connecting to 127.0.0.1:44917 44917
   [junit4]   2> 231728 WARN  (indexFetcher-1165-thread-1) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.h.IndexFetcher Could not log failed 
replication details
   [junit4]   2> org.apache.lucene.store.AlreadyClosedException: Already closed
   [junit4]   2>        at 
org.apache.solr.core.CachingDirectoryFactory.get(CachingDirectoryFactory.java:337)
   [junit4]   2>        at 
org.apache.solr.handler.ReplicationHandler.loadReplicationProperties(ReplicationHandler.java:1131)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.logReplicationTimeAndConfFiles(IndexFetcher.java:769)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.cleanup(IndexFetcher.java:677)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:659)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:332)
   [junit4]   2>        at 
org.apache.solr.handler.ReplicationHandler.doFetch(ReplicationHandler.java:419)
   [junit4]   2>        at 
org.apache.solr.handler.ReplicationHandler.lambda$setupPolling$12(ReplicationHandler.java:1183)
   [junit4]   2>        at 
java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
   [junit4]   2>        at 
java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
   [junit4]   2>        at 
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
   [junit4]   2>        at 
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
   [junit4]   2>        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
   [junit4]   2>        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]   2>        at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> 231728 ERROR (indexFetcher-1165-thread-1) 
[n:127.0.0.1:37479_m_w c:collection1 s:shard2 r:core_node46 
x:collection1_shard2_replica_t45] o.a.s.h.ReplicationHandler Index fetch failed 
:org.apache.solr.common.SolrException: No registered leader was found after 
waiting for 4000ms , collection: collection1 slice: shard2
   [junit4]   2>        at 
org.apache.solr.common.cloud.ZkStateReader.getLeaderRetry(ZkStateReader.java:758)
   [junit4]   2>        at 
org.apache.solr.common.cloud.ZkStateReader.getLeaderRetry(ZkStateReader.java:744)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.getLeaderReplica(IndexFetcher.java:667)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:367)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:332)
   [junit4]   2>        at 
org.apache.solr.handler.ReplicationHandler.doFetch(ReplicationHandler.java:419)
   [junit4]   2>        at 
org.apache.solr.handler.ReplicationHandler.lambda$setupPolling$12(ReplicationHandler.java:1183)
   [junit4]   2>        at 
java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
   [junit4]   2>        at 
java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
   [junit4]   2>        at 
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
   [junit4]   2>        at 
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
   [junit4]   2>        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
   [junit4]   2>        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]   2>        at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> 
   [junit4]   2> 231760 INFO  (Thread-537) [    ] o.a.s.c.ZkTestServer 
connecting to 127.0.0.1:44917 44917
   [junit4]   2> 231760 WARN  (Thread-537) [    ] o.a.s.c.ZkTestServer Watch 
limit violations: 
   [junit4]   2> Maximum concurrent create/delete watches above limit:
   [junit4]   2> 
   [junit4]   2>        6       /solr/aliases.json
   [junit4]   2>        4       /solr/security.json
   [junit4]   2>        4       /solr/configs/conf1
   [junit4]   2> 
   [junit4]   2> Maximum concurrent data watches above limit:
   [junit4]   2> 
   [junit4]   2>        6       /solr/clusterprops.json
   [junit4]   2>        6       /solr/clusterstate.json
   [junit4]   2>        3       /solr/collections/collection1/state.json
   [junit4]   2>        3       /solr/collections/collMinRf_1x3/state.json
   [junit4]   2>        2       /solr/collections/c8n_crud_1x2/state.json
   [junit4]   2> 
   [junit4]   2> Maximum concurrent children watches above limit:
   [junit4]   2> 
   [junit4]   2>        6       /solr/live_nodes
   [junit4]   2>        6       /solr/collections
   [junit4]   2>        4       /solr/overseer/queue
   [junit4]   2>        4       /solr/overseer/collection-queue-work
   [junit4]   2> 
   [junit4]   2> 231761 WARN  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.a.s.c.SocketProxy 
Closing 12 connections to: http://127.0.0.1:36769/m_w, target: 
http://127.0.0.1:35543/m_w
   [junit4]   2> 231761 WARN  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.a.s.c.SocketProxy 
Closing 3 connections to: http://127.0.0.1:37479/m_w, target: 
http://127.0.0.1:40925/m_w
   [junit4]   2> 231761 WARN  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.a.s.c.SocketProxy 
Closing 9 connections to: http://127.0.0.1:33981/m_w, target: 
http://127.0.0.1:36377/m_w
   [junit4]   2> 231761 WARN  
(TEST-HttpPartitionTest.test-seed#[D31F5E8D9AE0F05]) [    ] o.a.s.c.SocketProxy 
Closing 2 connections to: http://127.0.0.1:46785/m_w, target: 
http://127.0.0.1:45365/m_w
   [junit4]   2> NOTE: reproduce with: ant test  -Dtestcase=HttpPartitionTest 
-Dtests.method=test -Dtests.seed=D31F5E8D9AE0F05 -Dtests.multiplier=3 
-Dtests.slow=true -Dtests.locale=ko -Dtests.timezone=HST -Dtests.asserts=true 
-Dtests.file.encoding=US-ASCII
   [junit4] FAILURE 29.2s J2 | HttpPartitionTest.test <<<
   [junit4]    > Throwable #1: java.lang.AssertionError: Doc with id=1 not 
found in http://127.0.0.1:36769/m_w/collMinRf_1x3 due to: Path not found: /id; 
rsp={doc=null}
   [junit4]    >        at 
__randomizedtesting.SeedInfo.seed([D31F5E8D9AE0F05:8565CA32775262FD]:0)
   [junit4]    >        at 
org.apache.solr.cloud.HttpPartitionTest.assertDocExists(HttpPartitionTest.java:603)
   [junit4]    >        at 
org.apache.solr.cloud.HttpPartitionTest.assertDocsExistInAllReplicas(HttpPartitionTest.java:558)
   [junit4]    >        at 
org.apache.solr.cloud.HttpPartitionTest.testMinRf(HttpPartitionTest.java:249)
   [junit4]    >        at 
org.apache.solr.cloud.HttpPartitionTest.test(HttpPartitionTest.java:127)
   [junit4]    >        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:993)
   [junit4]    >        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968)
   [junit4]    >        at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> NOTE: leaving temporary files on disk at: 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.HttpPartitionTest_D31F5E8D9AE0F05-001
   [junit4]   2> Aug 21, 2017 6:54:43 PM 
com.carrotsearch.randomizedtesting.ThreadLeakControl checkThreadLeaks
   [junit4]   2> WARNING: Will linger awaiting termination of 2 leaked 
thread(s).
   [junit4]   2> NOTE: test params are: codec=CheapBastard, 
sim=RandomSimilarity(queryNorm=true): {}, locale=ko, timezone=HST
   [junit4]   2> NOTE: Linux 4.10.0-27-generic i386/Oracle Corporation 
1.8.0_144 (32-bit)/cpus=8,threads=1,free=114041776,total=308019200
   [junit4]   2> NOTE: All tests run in this JVM: [TestHalfAndHalfDocValues, 
TestNumericTokenStream, IgnoreCommitOptimizeUpdateProcessorFactoryTest, 
TestSuggestSpellingConverter, TestGroupingSearch, TestDFRSimilarityFactory, 
TestTrackingShardHandlerFactory, OverseerCollectionConfigSetProcessorTest, 
TestChildDocTransformer, WrapperMergePolicyFactoryTest, PrimitiveFieldTypeTest, 
DistributedSuggestComponentTest, UniqFieldsUpdateProcessorFactoryTest, 
TestAnalyzedSuggestions, TestMultiValuedNumericRangeQuery, 
LeaderFailureAfterFreshStartTest, TestStressReorder, TestSurroundQueryParser, 
SimpleFacetsTest, GraphQueryTest, DirectUpdateHandlerTest, ResponseHeaderTest, 
TestQuerySenderListener, TestConfigSetProperties, 
AsyncCallRequestStatusResponseTest, TestFreeTextSuggestions, TestCodecSupport, 
StatsReloadRaceTest, TestSchemalessBufferedUpdates, AliasIntegrationTest, 
TestFoldingMultitermQuery, JSONWriterTest, TestManagedStopFilterFactory, 
HardAutoCommitTest, BlockDirectoryTest, DistributedExpandComponentTest, 
TestUnifiedSolrHighlighter, TestExactSharedStatsCache, MinimalSchemaTest, 
TestCustomStream, QueryEqualityTest, HdfsSyncSliceTest, 
CdcrReplicationDistributedZkTest, SignatureUpdateProcessorFactoryTest, 
DeleteStatusTest, TestTrieFacet, HttpPartitionTest]
   [junit4] Completed [81/730 (1!)] on J2 in 29.48s, 1 test, 1 failure <<< 
FAILURES!

[...truncated 41567 lines...]
---------------------------------------------------------------------
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org

Reply via email to