Build: https://builds.apache.org/job/Lucene-Solr-Tests-6.x/735/
1 tests failed.
FAILED: org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test
Error Message:
Expected 2 of 3 replicas to be active but only found 1;
[core_node3:{"core":"c8n_1x3_lf_shard1_replica3","base_url":"http://127.0.0.1:33922","node_name":"127.0.0.1:33922_","state":"active","leader":"true"}];
clusterState: DocCollection(c8n_1x3_lf//clusterstate.json/37)={
"replicationFactor":"3", "shards":{"shard1":{
"range":"80000000-7fffffff", "state":"active", "replicas":{
"core_node1":{ "core":"c8n_1x3_lf_shard1_replica1",
"base_url":"http://127.0.0.1:51416", "node_name":"127.0.0.1:51416_",
"state":"down"}, "core_node2":{ "state":"down",
"base_url":"http://127.0.0.1:50562",
"core":"c8n_1x3_lf_shard1_replica2", "node_name":"127.0.0.1:50562_"},
"core_node3":{ "core":"c8n_1x3_lf_shard1_replica3",
"base_url":"http://127.0.0.1:33922", "node_name":"127.0.0.1:33922_",
"state":"active", "leader":"true"}}}},
"router":{"name":"compositeId"}, "maxShardsPerNode":"1",
"autoAddReplicas":"false"}
Stack Trace:
java.lang.AssertionError: Expected 2 of 3 replicas to be active but only found
1;
[core_node3:{"core":"c8n_1x3_lf_shard1_replica3","base_url":"http://127.0.0.1:33922","node_name":"127.0.0.1:33922_","state":"active","leader":"true"}];
clusterState: DocCollection(c8n_1x3_lf//clusterstate.json/37)={
"replicationFactor":"3",
"shards":{"shard1":{
"range":"80000000-7fffffff",
"state":"active",
"replicas":{
"core_node1":{
"core":"c8n_1x3_lf_shard1_replica1",
"base_url":"http://127.0.0.1:51416",
"node_name":"127.0.0.1:51416_",
"state":"down"},
"core_node2":{
"state":"down",
"base_url":"http://127.0.0.1:50562",
"core":"c8n_1x3_lf_shard1_replica2",
"node_name":"127.0.0.1:50562_"},
"core_node3":{
"core":"c8n_1x3_lf_shard1_replica3",
"base_url":"http://127.0.0.1:33922",
"node_name":"127.0.0.1:33922_",
"state":"active",
"leader":"true"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"false"}
at
__randomizedtesting.SeedInfo.seed([30587AEB416A1177:B80C4531EF967C8F]:0)
at org.junit.Assert.fail(Assert.java:93)
at org.junit.Assert.assertTrue(Assert.java:43)
at
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:168)
at
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:55)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957)
at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:992)
at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:967)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:745)
Build Log:
[...truncated 11039 lines...]
[junit4] Suite: org.apache.solr.cloud.LeaderFailoverAfterPartitionTest
[junit4] 2> Creating dataDir:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/init-core-data-001
[junit4] 2> 0 INFO
(SUITE-LeaderFailoverAfterPartitionTest-seed#[30587AEB416A1177]-worker) [ ]
o.a.s.SolrTestCaseJ4 Using TrieFields
[junit4] 2> 63 INFO
(SUITE-LeaderFailoverAfterPartitionTest-seed#[30587AEB416A1177]-worker) [ ]
o.e.j.u.log Logging initialized @3936ms
[junit4] 2> 71 INFO
(SUITE-LeaderFailoverAfterPartitionTest-seed#[30587AEB416A1177]-worker) [ ]
o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via:
@org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl=https://issues.apache.org/jira/browse/SOLR-5776)
[junit4] 2> 81 INFO
(SUITE-LeaderFailoverAfterPartitionTest-seed#[30587AEB416A1177]-worker) [ ]
o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /
[junit4] 2> 122 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 124 INFO (Thread-1) [ ] o.a.s.c.ZkTestServer client
port:0.0.0.0/0.0.0.0:0
[junit4] 2> 124 INFO (Thread-1) [ ] o.a.s.c.ZkTestServer Starting
server
[junit4] 2> 224 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.ZkTestServer start zk server on port:35156
[junit4] 2> 442 WARN (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [ ]
o.a.z.s.NIOServerCnxn caught end of stream exception
[junit4] 2> EndOfStreamException: Unable to read additional data from
client sessionid 0x15a5665bb020000, likely client has closed socket
[junit4] 2> at
org.apache.zookeeper.server.NIOServerCnxn.doIO(NIOServerCnxn.java:228)
[junit4] 2> at
org.apache.zookeeper.server.NIOServerCnxnFactory.run(NIOServerCnxnFactory.java:208)
[junit4] 2> at java.lang.Thread.run(Thread.java:745)
[junit4] 2> 480 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
to /configs/conf1/solrconfig.xml
[junit4] 2> 496 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/core/src/test-files/solr/collection1/conf/schema.xml
to /configs/conf1/schema.xml
[junit4] 2> 500 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
[junit4] 2> 504 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/core/src/test-files/solr/collection1/conf/stopwords.txt
to /configs/conf1/stopwords.txt
[junit4] 2> 508 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/core/src/test-files/solr/collection1/conf/protwords.txt
to /configs/conf1/protwords.txt
[junit4] 2> 514 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/core/src/test-files/solr/collection1/conf/currency.xml
to /configs/conf1/currency.xml
[junit4] 2> 517 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml
to /configs/conf1/enumsConfig.xml
[junit4] 2> 520 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json
to /configs/conf1/open-exchange-rates.json
[junit4] 2> 523 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt
to /configs/conf1/mapping-ISOLatin1Accent.txt
[junit4] 2> 525 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt
to /configs/conf1/old_synonyms.txt
[junit4] 2> 529 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
to /configs/conf1/synonyms.txt
[junit4] 2> 660 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.SolrTestCaseJ4 Writing core.properties file to
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/control-001/cores/collection1
[junit4] 2> 852 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.Server jetty-9.3.14.v20161028
[junit4] 2> 891 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@70f025ea{/,null,AVAILABLE}
[junit4] 2> 914 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@775e8ef2{HTTP/1.1,[http/1.1]}{127.0.0.1:34910}
[junit4] 2> 914 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.Server Started @4825ms
[junit4] 2> 915 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties:
{solr.data.dir=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/tempDir-001/control/data,
hostContext=/, hostPort=51416,
coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/control-001/cores}
[junit4] 2> 954 ERROR
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 957 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version
6.5.0
[junit4] 2> 961 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 961 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 974 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-02-19T12:43:12.784Z
[junit4] 2> 995 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.SolrResourceLoader solr home defaulted to 'solr/' (could not find
system property or JNDI)
[junit4] 2> 1008 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 1010 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.SolrXmlConfig Loading container configuration from
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/control-001/solr.xml
[junit4] 2> 2131 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params:
socketTimeout=340000&connTimeout=45000&retry=true
[junit4] 2> 2136 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:35156/solr
[junit4] 2> 2549 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:51416_ ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 2557 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:51416_ ] o.a.s.c.OverseerElectionContext I am going to be the
leader 127.0.0.1:51416_
[junit4] 2> 2559 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:51416_ ] o.a.s.c.Overseer Overseer
(id=97485336871043076-127.0.0.1:51416_-n_0000000000) starting
[junit4] 2> 2709 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:51416_ ] o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:51416_
[junit4] 2> 2714 INFO
(zkCallback-7-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 3579 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:51416_ ] o.a.s.c.CorePropertiesLocator Found 1 core definitions
underneath
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/control-001/cores
[junit4] 2> 3584 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:51416_ ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
[junit4] 2> 3684 INFO
(OverseerStateUpdate-97485336871043076-127.0.0.1:51416_-n_0000000000)
[n:127.0.0.1:51416_ ] o.a.s.c.o.ReplicaMutator Assigning new node to shard
shard=shard1
[junit4] 2> 4800 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 6.5.0
[junit4] 2> 5574 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.s.IndexSchema [collection1] Schema
name=test
[junit4] 2> 6024 WARN
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.c.SolrResourceLoader Solr loaded a
deprecated plugin/analysis class [solr.WordDelimiterFilterFactory]. Please
consult documentation how to replace it accordingly.
[junit4] 2> 6271 WARN
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.c.SolrResourceLoader Solr loaded a
deprecated plugin/analysis class [solr.SynonymFilterFactory]. Please consult
documentation how to replace it accordingly.
[junit4] 2> 6695 WARN
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.s.IndexSchema [collection1] default
search field in schema is text. WARNING: Deprecated, please use 'df' on request
instead.
[junit4] 2> 6727 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.s.IndexSchema Loaded schema
test/1.0 with uniqueid field id
[junit4] 2> 7191 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.c.CoreContainer Creating SolrCore
'collection1' using configuration from collection control_collection
[junit4] 2> 7296 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening
new SolrCore at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/control-001/cores/collection1],
dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/control-001/cores/collection1/data/]
[junit4] 2> 7314 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.c.JmxMonitoredMap JMX monitoring is
enabled. Adding Solr mbeans to JMX Server:
com.sun.jmx.mbeanserver.JmxMBeanServer@364973c
[junit4] 2> 7563 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.u.RandomMergePolicy
RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy:
[TieredMergePolicy: maxMergeAtOnce=44, maxMergeAtOnceExplicit=19,
maxMergedSegmentMB=38.994140625, floorSegmentMB=0.6474609375,
forceMergeDeletesPctAllowed=20.107316332877765, segmentsPerTier=48.0,
maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.8521295305705854
[junit4] 2> 8140 WARN
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.c.RequestHandlers INVALID paramSet
a in requestHandler {type = requestHandler,name = /dump,class =
DumpRequestHandler,attributes = {initParams=a, name=/dump,
class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
[junit4] 2> 8912 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.u.UpdateHandler Using UpdateLog
implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 8912 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog:
dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10
numVersionBuckets=65536
[junit4] 2> 8981 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 8986 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 8987 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.u.RandomMergePolicy
RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy:
[TieredMergePolicy: maxMergeAtOnce=30, maxMergeAtOnceExplicit=44,
maxMergedSegmentMB=97.09375, floorSegmentMB=0.673828125,
forceMergeDeletesPctAllowed=7.967384497022806, segmentsPerTier=36.0,
maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0
[junit4] 2> 9003 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.s.SolrIndexSearcher Opening
[Searcher@1ea7dd4f[collection1] main]
[junit4] 2> 9023 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.r.ManagedResourceStorage Configured
ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 9099 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 9118 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000
[junit4] 2> 9196 INFO
(searcherExecutor-9-thread-1-processing-n:127.0.0.1:51416_ x:collection1
c:control_collection) [n:127.0.0.1:51416_ c:control_collection x:collection1]
o.a.s.c.SolrCore [collection1] Registered new searcher
Searcher@1ea7dd4f[collection1]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 9220 INFO
(coreLoadExecutor-8-thread-1-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_
c:control_collection x:collection1] o.a.s.u.UpdateLog Could not find max
version in index or recent updates, using new clock 1559765399415816192
[junit4] 2> 9791 INFO
(coreZkRegister-1-thread-1-processing-n:127.0.0.1:51416_ x:collection1
c:control_collection) [n:127.0.0.1:51416_ c:control_collection s:shard1
r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas
found to continue.
[junit4] 2> 9792 INFO
(coreZkRegister-1-thread-1-processing-n:127.0.0.1:51416_ x:collection1
c:control_collection) [n:127.0.0.1:51416_ c:control_collection s:shard1
r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new
leader - try and sync
[junit4] 2> 9792 INFO
(coreZkRegister-1-thread-1-processing-n:127.0.0.1:51416_ x:collection1
c:control_collection) [n:127.0.0.1:51416_ c:control_collection s:shard1
r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync replicas to
http://127.0.0.1:51416/collection1/
[junit4] 2> 9796 INFO
(coreZkRegister-1-thread-1-processing-n:127.0.0.1:51416_ x:collection1
c:control_collection) [n:127.0.0.1:51416_ c:control_collection s:shard1
r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync
replicas to me
[junit4] 2> 9796 INFO
(coreZkRegister-1-thread-1-processing-n:127.0.0.1:51416_ x:collection1
c:control_collection) [n:127.0.0.1:51416_ c:control_collection s:shard1
r:core_node1 x:collection1] o.a.s.c.SyncStrategy
http://127.0.0.1:51416/collection1/ has no replicas
[junit4] 2> 9908 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 9918 INFO
(coreZkRegister-1-thread-1-processing-n:127.0.0.1:51416_ x:collection1
c:control_collection) [n:127.0.0.1:51416_ c:control_collection s:shard1
r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new
leader: http://127.0.0.1:51416/collection1/ shard1
[junit4] 2> 9918 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:35156/solr ready
[junit4] 2> 9922 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection
loss:false
[junit4] 2> 10170 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.SolrTestCaseJ4 Writing core.properties file to
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-1-001/cores/collection1
[junit4] 2> 10171 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-1-001
[junit4] 2> 10298 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.Server jetty-9.3.14.v20161028
[junit4] 2> 10192 INFO
(coreZkRegister-1-thread-1-processing-n:127.0.0.1:51416_ x:collection1
c:control_collection) [n:127.0.0.1:51416_ c:control_collection s:shard1
r:core_node1 x:collection1] o.a.s.c.ZkController I am the leader, no recovery
necessary
[junit4] 2> 10643 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@1c147180{/,null,AVAILABLE}
[junit4] 2> 10644 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@4569c222{HTTP/1.1,[http/1.1]}{127.0.0.1:50172}
[junit4] 2> 10644 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.Server Started @14555ms
[junit4] 2> 10644 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties:
{solr.data.dir=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/tempDir-001/jetty1,
solrconfig=solrconfig.xml, hostContext=/, hostPort=57664,
coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-1-001/cores}
[junit4] 2> 10644 ERROR
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 10645 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version
6.5.0
[junit4] 2> 10647 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 10647 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 10648 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-02-19T12:43:22.469Z
[junit4] 2> 10675 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 10675 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.SolrXmlConfig Loading container configuration from
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-1-001/solr.xml
[junit4] 2> 10692 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params:
socketTimeout=340000&connTimeout=45000&retry=true
[junit4] 2> 10700 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:35156/solr
[junit4] 2> 10798 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:57664_ ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (0) -> (1)
[junit4] 2> 10803 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:57664_ ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 10810 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:57664_ ] o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:57664_
[junit4] 2> 10815 INFO (zkCallback-11-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 10817 INFO
(zkCallback-16-thread-1-processing-n:127.0.0.1:57664_) [n:127.0.0.1:57664_ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 10824 INFO
(zkCallback-7-thread-3-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 11137 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:57664_ ] o.a.s.c.CorePropertiesLocator Found 1 core definitions
underneath
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-1-001/cores
[junit4] 2> 11137 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:57664_ ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
[junit4] 2> 11166 INFO
(OverseerStateUpdate-97485336871043076-127.0.0.1:51416_-n_0000000000)
[n:127.0.0.1:51416_ ] o.a.s.c.o.ReplicaMutator Assigning new node to shard
shard=shard2
[junit4] 2> 12293 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.c.SolrConfig Using
Lucene MatchVersion: 6.5.0
[junit4] 2> 12363 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.s.IndexSchema
[collection1] Schema name=test
[junit4] 2> 12919 WARN
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.s.IndexSchema
[collection1] default search field in schema is text. WARNING: Deprecated,
please use 'df' on request instead.
[junit4] 2> 12940 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.s.IndexSchema Loaded
schema test/1.0 with uniqueid field id
[junit4] 2> 13123 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.c.CoreContainer
Creating SolrCore 'collection1' using configuration from collection collection1
[junit4] 2> 13124 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.c.SolrCore
[[collection1] ] Opening new SolrCore at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-1-001/cores/collection1],
dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-1-001/cores/collection1/data/]
[junit4] 2> 13124 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.c.JmxMonitoredMap JMX
monitoring is enabled. Adding Solr mbeans to JMX Server:
com.sun.jmx.mbeanserver.JmxMBeanServer@364973c
[junit4] 2> 13137 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.u.RandomMergePolicy
RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy:
[TieredMergePolicy: maxMergeAtOnce=44, maxMergeAtOnceExplicit=19,
maxMergedSegmentMB=38.994140625, floorSegmentMB=0.6474609375,
forceMergeDeletesPctAllowed=20.107316332877765, segmentsPerTier=48.0,
maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.8521295305705854
[junit4] 2> 13170 WARN
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.c.RequestHandlers
INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class
= DumpRequestHandler,attributes = {initParams=a, name=/dump,
class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
[junit4] 2> 13384 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.u.UpdateHandler Using
UpdateLog implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 13399 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.u.UpdateLog
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 13401 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.u.CommitTracker Hard
AutoCommit: disabled
[junit4] 2> 13403 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.u.CommitTracker Soft
AutoCommit: disabled
[junit4] 2> 13404 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.u.RandomMergePolicy
RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy:
[TieredMergePolicy: maxMergeAtOnce=30, maxMergeAtOnceExplicit=44,
maxMergedSegmentMB=97.09375, floorSegmentMB=0.673828125,
forceMergeDeletesPctAllowed=7.967384497022806, segmentsPerTier=36.0,
maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0
[junit4] 2> 13405 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.s.SolrIndexSearcher
Opening [Searcher@5793c8f6[collection1] main]
[junit4] 2> 13407 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1]
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase:
/configs/conf1
[junit4] 2> 13408 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1]
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using
ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 13408 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.h.ReplicationHandler
Commits will be reserved for 10000
[junit4] 2> 13420 INFO
(searcherExecutor-20-thread-1-processing-n:127.0.0.1:57664_ x:collection1
c:collection1) [n:127.0.0.1:57664_ c:collection1 x:collection1]
o.a.s.c.SolrCore [collection1] Registered new searcher
Searcher@5793c8f6[collection1]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 13421 INFO
(coreLoadExecutor-19-thread-1-processing-n:127.0.0.1:57664_)
[n:127.0.0.1:57664_ c:collection1 x:collection1] o.a.s.u.UpdateLog Could not
find max version in index or recent updates, using new clock 1559765403820883968
[junit4] 2> 13502 INFO
(coreZkRegister-14-thread-1-processing-n:127.0.0.1:57664_ x:collection1
c:collection1) [n:127.0.0.1:57664_ c:collection1 s:shard2 r:core_node1
x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to
continue.
[junit4] 2> 13502 INFO
(coreZkRegister-14-thread-1-processing-n:127.0.0.1:57664_ x:collection1
c:collection1) [n:127.0.0.1:57664_ c:collection1 s:shard2 r:core_node1
x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try
and sync
[junit4] 2> 13502 INFO
(coreZkRegister-14-thread-1-processing-n:127.0.0.1:57664_ x:collection1
c:collection1) [n:127.0.0.1:57664_ c:collection1 s:shard2 r:core_node1
x:collection1] o.a.s.c.SyncStrategy Sync replicas to
http://127.0.0.1:57664/collection1/
[junit4] 2> 13508 INFO
(coreZkRegister-14-thread-1-processing-n:127.0.0.1:57664_ x:collection1
c:collection1) [n:127.0.0.1:57664_ c:collection1 s:shard2 r:core_node1
x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
[junit4] 2> 13508 INFO
(coreZkRegister-14-thread-1-processing-n:127.0.0.1:57664_ x:collection1
c:collection1) [n:127.0.0.1:57664_ c:collection1 s:shard2 r:core_node1
x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:57664/collection1/ has no
replicas
[junit4] 2> 13514 INFO
(coreZkRegister-14-thread-1-processing-n:127.0.0.1:57664_ x:collection1
c:collection1) [n:127.0.0.1:57664_ c:collection1 s:shard2 r:core_node1
x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader:
http://127.0.0.1:57664/collection1/ shard2
[junit4] 2> 13806 INFO
(coreZkRegister-14-thread-1-processing-n:127.0.0.1:57664_ x:collection1
c:collection1) [n:127.0.0.1:57664_ c:collection1 s:shard2 r:core_node1
x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary
[junit4] 2> 13938 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.SolrTestCaseJ4 Writing core.properties file to
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-2-001/cores/collection1
[junit4] 2> 13940 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-2-001
[junit4] 2> 13941 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.Server jetty-9.3.14.v20161028
[junit4] 2> 14032 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@79795a43{/,null,AVAILABLE}
[junit4] 2> 14033 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@11cf4360{HTTP/1.1,[http/1.1]}{127.0.0.1:42615}
[junit4] 2> 14033 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.Server Started @17944ms
[junit4] 2> 14033 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties:
{solr.data.dir=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/tempDir-001/jetty2,
solrconfig=solrconfig.xml, hostContext=/, hostPort=50562,
coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-2-001/cores}
[junit4] 2> 14033 ERROR
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 14040 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version
6.5.0
[junit4] 2> 14040 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 14040 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 14040 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-02-19T12:43:25.862Z
[junit4] 2> 14067 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 14067 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.SolrXmlConfig Loading container configuration from
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-2-001/solr.xml
[junit4] 2> 14088 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params:
socketTimeout=340000&connTimeout=45000&retry=true
[junit4] 2> 14089 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:35156/solr
[junit4] 2> 14141 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:50562_ ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (0) -> (2)
[junit4] 2> 14149 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:50562_ ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 14153 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:50562_ ] o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:50562_
[junit4] 2> 14161 INFO (zkCallback-11-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 14161 INFO
(zkCallback-16-thread-1-processing-n:127.0.0.1:57664_) [n:127.0.0.1:57664_ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 14168 INFO
(zkCallback-7-thread-3-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 14174 INFO
(zkCallback-22-thread-1-processing-n:127.0.0.1:50562_) [n:127.0.0.1:50562_ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 14668 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:50562_ ] o.a.s.c.CorePropertiesLocator Found 1 core definitions
underneath
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-2-001/cores
[junit4] 2> 14669 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:50562_ ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
[junit4] 2> 14698 INFO
(OverseerStateUpdate-97485336871043076-127.0.0.1:51416_-n_0000000000)
[n:127.0.0.1:51416_ ] o.a.s.c.o.ReplicaMutator Assigning new node to shard
shard=shard1
[junit4] 2> 15769 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.c.SolrConfig Using
Lucene MatchVersion: 6.5.0
[junit4] 2> 16056 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.s.IndexSchema
[collection1] Schema name=test
[junit4] 2> 16282 WARN
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.s.IndexSchema
[collection1] default search field in schema is text. WARNING: Deprecated,
please use 'df' on request instead.
[junit4] 2> 16285 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.s.IndexSchema Loaded
schema test/1.0 with uniqueid field id
[junit4] 2> 16343 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.c.CoreContainer
Creating SolrCore 'collection1' using configuration from collection collection1
[junit4] 2> 16343 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.c.SolrCore
[[collection1] ] Opening new SolrCore at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-2-001/cores/collection1],
dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-2-001/cores/collection1/data/]
[junit4] 2> 16343 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.c.JmxMonitoredMap JMX
monitoring is enabled. Adding Solr mbeans to JMX Server:
com.sun.jmx.mbeanserver.JmxMBeanServer@364973c
[junit4] 2> 16345 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.u.RandomMergePolicy
RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy:
[TieredMergePolicy: maxMergeAtOnce=44, maxMergeAtOnceExplicit=19,
maxMergedSegmentMB=38.994140625, floorSegmentMB=0.6474609375,
forceMergeDeletesPctAllowed=20.107316332877765, segmentsPerTier=48.0,
maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.8521295305705854
[junit4] 2> 16391 WARN
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.c.RequestHandlers
INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class
= DumpRequestHandler,attributes = {initParams=a, name=/dump,
class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
[junit4] 2> 16488 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.u.UpdateHandler Using
UpdateLog implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 16488 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.u.UpdateLog
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 16489 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.u.CommitTracker Hard
AutoCommit: disabled
[junit4] 2> 16489 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.u.CommitTracker Soft
AutoCommit: disabled
[junit4] 2> 16503 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.u.RandomMergePolicy
RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy:
[TieredMergePolicy: maxMergeAtOnce=30, maxMergeAtOnceExplicit=44,
maxMergedSegmentMB=97.09375, floorSegmentMB=0.673828125,
forceMergeDeletesPctAllowed=7.967384497022806, segmentsPerTier=36.0,
maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0
[junit4] 2> 16508 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.s.SolrIndexSearcher
Opening [Searcher@25e92499[collection1] main]
[junit4] 2> 16510 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1]
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase:
/configs/conf1
[junit4] 2> 16510 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1]
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using
ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 16510 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.h.ReplicationHandler
Commits will be reserved for 10000
[junit4] 2> 16512 INFO
(coreLoadExecutor-30-thread-1-processing-n:127.0.0.1:50562_)
[n:127.0.0.1:50562_ c:collection1 x:collection1] o.a.s.u.UpdateLog Could not
find max version in index or recent updates, using new clock 1559765407062032384
[junit4] 2> 16538 INFO
(coreZkRegister-25-thread-1-processing-n:127.0.0.1:50562_ x:collection1
c:collection1) [n:127.0.0.1:50562_ c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to
continue.
[junit4] 2> 16538 INFO
(coreZkRegister-25-thread-1-processing-n:127.0.0.1:50562_ x:collection1
c:collection1) [n:127.0.0.1:50562_ c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try
and sync
[junit4] 2> 16538 INFO
(coreZkRegister-25-thread-1-processing-n:127.0.0.1:50562_ x:collection1
c:collection1) [n:127.0.0.1:50562_ c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.SyncStrategy Sync replicas to
http://127.0.0.1:50562/collection1/
[junit4] 2> 16538 INFO
(coreZkRegister-25-thread-1-processing-n:127.0.0.1:50562_ x:collection1
c:collection1) [n:127.0.0.1:50562_ c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
[junit4] 2> 16538 INFO
(coreZkRegister-25-thread-1-processing-n:127.0.0.1:50562_ x:collection1
c:collection1) [n:127.0.0.1:50562_ c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:50562/collection1/ has no
replicas
[junit4] 2> 16539 INFO
(searcherExecutor-31-thread-1-processing-n:127.0.0.1:50562_ x:collection1
c:collection1) [n:127.0.0.1:50562_ c:collection1 x:collection1]
o.a.s.c.SolrCore [collection1] Registered new searcher
Searcher@25e92499[collection1]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 16542 INFO
(coreZkRegister-25-thread-1-processing-n:127.0.0.1:50562_ x:collection1
c:collection1) [n:127.0.0.1:50562_ c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader:
http://127.0.0.1:50562/collection1/ shard1
[junit4] 2> 16693 INFO
(coreZkRegister-25-thread-1-processing-n:127.0.0.1:50562_ x:collection1
c:collection1) [n:127.0.0.1:50562_ c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary
[junit4] 2> 16789 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.SolrTestCaseJ4 Writing core.properties file to
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-3-001/cores/collection1
[junit4] 2> 16790 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-3-001
[junit4] 2> 16820 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.Server jetty-9.3.14.v20161028
[junit4] 2> 16858 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@64e809ff{/,null,AVAILABLE}
[junit4] 2> 16859 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@4c16c812{HTTP/1.1,[http/1.1]}{127.0.0.1:40894}
[junit4] 2> 16859 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.Server Started @20770ms
[junit4] 2> 16859 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties:
{solr.data.dir=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/tempDir-001/jetty3,
solrconfig=solrconfig.xml, hostContext=/, hostPort=33922,
coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-3-001/cores}
[junit4] 2> 16859 ERROR
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 16951 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version
6.5.0
[junit4] 2> 16951 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 16951 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 16959 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-02-19T12:43:28.781Z
[junit4] 2> 16993 WARN (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [ ]
o.a.z.s.NIOServerCnxn caught end of stream exception
[junit4] 2> EndOfStreamException: Unable to read additional data from
client sessionid 0x15a5665bb02000c, likely client has closed socket
[junit4] 2> at
org.apache.zookeeper.server.NIOServerCnxn.doIO(NIOServerCnxn.java:228)
[junit4] 2> at
org.apache.zookeeper.server.NIOServerCnxnFactory.run(NIOServerCnxnFactory.java:208)
[junit4] 2> at java.lang.Thread.run(Thread.java:745)
[junit4] 2> 16998 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 16998 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.SolrXmlConfig Loading container configuration from
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-3-001/solr.xml
[junit4] 2> 17016 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params:
socketTimeout=340000&connTimeout=45000&retry=true
[junit4] 2> 17017 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:35156/solr
[junit4] 2> 17049 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:33922_ ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (0) -> (3)
[junit4] 2> 17051 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:33922_ ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 17061 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:33922_ ] o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:33922_
[junit4] 2> 17067 INFO (zkCallback-11-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
[junit4] 2> 17067 INFO
(zkCallback-16-thread-1-processing-n:127.0.0.1:57664_) [n:127.0.0.1:57664_ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
[junit4] 2> 17067 INFO
(zkCallback-7-thread-3-processing-n:127.0.0.1:51416_) [n:127.0.0.1:51416_ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
[junit4] 2> 17068 INFO
(zkCallback-22-thread-1-processing-n:127.0.0.1:50562_) [n:127.0.0.1:50562_ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
[junit4] 2> 17068 INFO
(zkCallback-28-thread-1-processing-n:127.0.0.1:33922_) [n:127.0.0.1:33922_ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
[junit4] 2> 17122 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:33922_ ] o.a.s.c.CorePropertiesLocator Found 1 core definitions
underneath
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-3-001/cores
[junit4] 2> 17122 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177])
[n:127.0.0.1:33922_ ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
[junit4] 2> 17226 INFO
(OverseerStateUpdate-97485336871043076-127.0.0.1:51416_-n_0000000000)
[n:127.0.0.1:51416_ ] o.a.s.c.o.ReplicaMutator Assigning new node to shard
shard=shard2
[junit4] 2> 18257 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.c.SolrConfig Using
Lucene MatchVersion: 6.5.0
[junit4] 2> 18329 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.s.IndexSchema
[collection1] Schema name=test
[junit4] 2> 18513 WARN
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.s.IndexSchema
[collection1] default search field in schema is text. WARNING: Deprecated,
please use 'df' on request instead.
[junit4] 2> 18528 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.s.IndexSchema Loaded
schema test/1.0 with uniqueid field id
[junit4] 2> 18588 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.c.CoreContainer
Creating SolrCore 'collection1' using configuration from collection collection1
[junit4] 2> 18589 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.c.SolrCore
[[collection1] ] Opening new SolrCore at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-3-001/cores/collection1],
dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-3-001/cores/collection1/data/]
[junit4] 2> 18589 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.c.JmxMonitoredMap JMX
monitoring is enabled. Adding Solr mbeans to JMX Server:
com.sun.jmx.mbeanserver.JmxMBeanServer@364973c
[junit4] 2> 18613 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.u.RandomMergePolicy
RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy:
[TieredMergePolicy: maxMergeAtOnce=44, maxMergeAtOnceExplicit=19,
maxMergedSegmentMB=38.994140625, floorSegmentMB=0.6474609375,
forceMergeDeletesPctAllowed=20.107316332877765, segmentsPerTier=48.0,
maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.8521295305705854
[junit4] 2> 18640 WARN
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.c.RequestHandlers
INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class
= DumpRequestHandler,attributes = {initParams=a, name=/dump,
class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
[junit4] 2> 18739 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.u.UpdateHandler Using
UpdateLog implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 18740 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.u.UpdateLog
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 18740 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.u.CommitTracker Hard
AutoCommit: disabled
[junit4] 2> 18740 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.u.CommitTracker Soft
AutoCommit: disabled
[junit4] 2> 18745 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.u.RandomMergePolicy
RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy:
[TieredMergePolicy: maxMergeAtOnce=30, maxMergeAtOnceExplicit=44,
maxMergedSegmentMB=97.09375, floorSegmentMB=0.673828125,
forceMergeDeletesPctAllowed=7.967384497022806, segmentsPerTier=36.0,
maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0
[junit4] 2> 18746 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.s.SolrIndexSearcher
Opening [Searcher@54088e44[collection1] main]
[junit4] 2> 18748 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1]
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase:
/configs/conf1
[junit4] 2> 18748 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1]
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using
ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 18748 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.h.ReplicationHandler
Commits will be reserved for 10000
[junit4] 2> 18765 INFO
(searcherExecutor-42-thread-1-processing-n:127.0.0.1:33922_ x:collection1
c:collection1) [n:127.0.0.1:33922_ c:collection1 x:collection1]
o.a.s.c.SolrCore [collection1] Registered new searcher
Searcher@54088e44[collection1]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 18766 INFO
(coreLoadExecutor-41-thread-1-processing-n:127.0.0.1:33922_)
[n:127.0.0.1:33922_ c:collection1 x:collection1] o.a.s.u.UpdateLog Could not
find max version in index or recent updates, using new clock 1559765409425522688
[junit4] 2> 18783 INFO
(coreZkRegister-36-thread-1-processing-n:127.0.0.1:33922_ x:collection1
c:collection1) [n:127.0.0.1:33922_ c:collection1 s:shard2 r:core_node3
x:collection1] o.a.s.c.ZkController Core needs to recover:collection1
[junit4] 2> 18800 INFO
(updateExecutor-25-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.u.DefaultSolrCoreState Running recovery
[junit4] 2> 18805 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Starting recovery process.
recoveringAfterStartup=true
[junit4] 2> 18805 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy ###### startupVersions=[[]]
[junit4] 2> 18806 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Begin buffering updates.
core=[collection1]
[junit4] 2> 18806 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.u.UpdateLog Starting to buffer updates.
FSUpdateLog{state=ACTIVE, tlog=null}
[junit4] 2> 18806 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Publishing state of core
[collection1] as recovering, leader is [http://127.0.0.1:57664/collection1/]
and I am [http://127.0.0.1:33922/collection1/]
[junit4] 2> 18840 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Sending prep recovery
command to [http://127.0.0.1:57664]; [WaitForState:
action=PREPRECOVERY&core=collection1&nodeName=127.0.0.1:33922_&coreNodeName=core_node3&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
[junit4] 2> 19244 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.SolrTestCaseJ4 ###Starting test
[junit4] 2> 19244 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase Wait for recoveries to finish - wait
30000 for each attempt
[junit4] 2> 19244 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection:
collection1 failOnTimeout:true timeout (sec):30000
[junit4] 2> 19284 INFO (SocketProxy-Acceptor-57664) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=40881,localport=57664], receiveBufferSize:531000
[junit4] 2> 19320 INFO (SocketProxy-Acceptor-57664) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=50172,localport=36449], receiveBufferSize=530904
[junit4] 2> 19496 INFO (qtp2144719958-64) [n:127.0.0.1:57664_ ]
o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node3, state:
recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true
[junit4] 2> 19497 INFO (qtp2144719958-64) [n:127.0.0.1:57664_ ]
o.a.s.h.a.PrepRecoveryOp Will wait a max of 183 seconds to see collection1
(shard2 of collection1) have state: recovering
[junit4] 2> 19497 INFO (qtp2144719958-64) [n:127.0.0.1:57664_ ]
o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1,
shard=shard2, thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader?
true, live=true, checkLive=true, currentState=recovering, localState=active,
nodeName=127.0.0.1:33922_, coreNodeName=core_node3,
onlyIfActiveCheckResult=false, nodeProps:
core_node3:{"core":"collection1","base_url":"http://127.0.0.1:33922","node_name":"127.0.0.1:33922_","state":"recovering"}
[junit4] 2> 19497 INFO (qtp2144719958-64) [n:127.0.0.1:57664_ ]
o.a.s.h.a.PrepRecoveryOp Waited coreNodeName: core_node3, state: recovering,
checkLive: true, onlyIfLeader: true for: 0 seconds.
[junit4] 2> 19497 INFO (qtp2144719958-64) [n:127.0.0.1:57664_ ]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores
params={nodeName=127.0.0.1:33922_&onlyIfLeaderActive=true&core=collection1&coreNodeName=core_node3&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2}
status=0 QTime=8
[junit4] 2> 26579 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Attempting to PeerSync
from [http://127.0.0.1:57664/collection1/] - recoveringAfterStartup=[true]
[junit4] 2> 26584 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.u.PeerSync PeerSync: core=collection1
url=http://127.0.0.1:33922 START replicas=[http://127.0.0.1:57664/collection1/]
nUpdates=100
[junit4] 2> 26591 INFO (SocketProxy-Acceptor-57664) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=40920,localport=57664], receiveBufferSize:531000
[junit4] 2> 26592 INFO (SocketProxy-Acceptor-57664) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=50172,localport=36488], receiveBufferSize=530904
[junit4] 2> 26602 INFO (qtp2144719958-62) [n:127.0.0.1:57664_
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.IndexFingerprint
IndexFingerprint millis:3.0 result:{maxVersionSpecified=9223372036854775807,
maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0,
maxDoc=0}
[junit4] 2> 26602 INFO (qtp2144719958-62) [n:127.0.0.1:57664_
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request
[collection1] webapp= path=/get
params={distrib=false&qt=/get&getFingerprint=9223372036854775807&wt=javabin&version=2}
status=0 QTime=9
[junit4] 2> 26605 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.u.IndexFingerprint IndexFingerprint
millis:0.0 result:{maxVersionSpecified=9223372036854775807,
maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0,
maxDoc=0}
[junit4] 2> 26605 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.u.PeerSync We are already in sync. No need to
do a PeerSync
[junit4] 2> 26607 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 start
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 26607 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted
changes. Skipping IW.commit.
[junit4] 2> 26608 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
[junit4] 2> 26608 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy PeerSync stage of recovery
was successful.
[junit4] 2> 26608 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Replaying updates buffered
during PeerSync.
[junit4] 2> 26608 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy No replay needed.
[junit4] 2> 26608 INFO
(recoveryExecutor-26-thread-1-processing-n:127.0.0.1:33922_ x:collection1
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:33922_ c:collection1 s:shard2
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Registering as Active
after recovery.
[junit4] 2> 27245 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractDistribZkTestBase Recoveries finished - collection: collection1
[junit4] 2> 27248 INFO (SocketProxy-Acceptor-51416) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=43183,localport=51416], receiveBufferSize:531000
[junit4] 2> 27252 INFO (SocketProxy-Acceptor-51416) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=34910,localport=57359], receiveBufferSize=530904
[junit4] 2> 27265 INFO (qtp1245658241-28) [n:127.0.0.1:51416_
c:control_collection s:shard1 r:core_node1 x:collection1]
o.a.s.u.DirectUpdateHandler2 start
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 27266 INFO (qtp1245658241-28) [n:127.0.0.1:51416_
c:control_collection s:shard1 r:core_node1 x:collection1]
o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
[junit4] 2> 27266 INFO (qtp1245658241-28) [n:127.0.0.1:51416_
c:control_collection s:shard1 r:core_node1 x:collection1]
o.a.s.u.DirectUpdateHandler2 end_commit_flush
[junit4] 2> 27266 INFO (qtp1245658241-28) [n:127.0.0.1:51416_
c:control_collection s:shard1 r:core_node1 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp= path=/update
params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=}
0 12
[junit4] 2> 27274 INFO (SocketProxy-Acceptor-50562) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=35347,localport=50562], receiveBufferSize:531000
[junit4] 2> 27277 INFO (SocketProxy-Acceptor-50562) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=42615,localport=58807], receiveBufferSize=530904
[junit4] 2> 27285 INFO (SocketProxy-Acceptor-33922) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=33395,localport=33922], receiveBufferSize:531000
[junit4] 2> 27286 INFO (SocketProxy-Acceptor-33922) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=40894,localport=58484], receiveBufferSize=530904
[junit4] 2> 27288 INFO (SocketProxy-Acceptor-50562) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=35351,localport=50562], receiveBufferSize:531000
[junit4] 2> 27293 INFO (SocketProxy-Acceptor-50562) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=42615,localport=58811], receiveBufferSize=530904
[junit4] 2> 27294 INFO (qtp1849265150-92) [n:127.0.0.1:50562_
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2
start
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 27294 INFO (qtp1916420853-123) [n:127.0.0.1:33922_
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2
start
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 27296 INFO (qtp1849265150-92) [n:127.0.0.1:50562_
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2
No uncommitted changes. Skipping IW.commit.
[junit4] 2> 27301 INFO (SocketProxy-Acceptor-57664) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=40930,localport=57664], receiveBufferSize:531000
[junit4] 2> 27300 INFO (qtp1916420853-123) [n:127.0.0.1:33922_
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2
No uncommitted changes. Skipping IW.commit.
[junit4] 2> 27301 INFO (qtp1916420853-123) [n:127.0.0.1:33922_
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2
end_commit_flush
[junit4] 2> 27301 INFO (qtp1916420853-123) [n:127.0.0.1:33922_
c:collection1 s:shard2 r:core_node3 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp= path=/update
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:50562/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
0 8
[junit4] 2> 27302 INFO (SocketProxy-Acceptor-57664) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=50172,localport=36498], receiveBufferSize=530904
[junit4] 2> 27302 INFO (qtp1849265150-92) [n:127.0.0.1:50562_
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2
end_commit_flush
[junit4] 2> 27302 INFO (qtp1849265150-92) [n:127.0.0.1:50562_
c:collection1 s:shard1 r:core_node2 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp= path=/update
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:50562/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
0 8
[junit4] 2> 27306 INFO (qtp2144719958-65) [n:127.0.0.1:57664_
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2
start
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 27306 INFO (qtp2144719958-65) [n:127.0.0.1:57664_
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2
No uncommitted changes. Skipping IW.commit.
[junit4] 2> 27308 INFO (qtp2144719958-65) [n:127.0.0.1:57664_
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2
end_commit_flush
[junit4] 2> 27312 INFO (qtp2144719958-65) [n:127.0.0.1:57664_
c:collection1 s:shard2 r:core_node1 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp= path=/update
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:50562/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
0 6
[junit4] 2> 27313 INFO (qtp1849265150-95) [n:127.0.0.1:50562_
c:collection1 s:shard1 r:core_node2 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp= path=/update
params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=}
0 35
[junit4] 2> 27318 INFO (SocketProxy-Acceptor-57664) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=40932,localport=57664], receiveBufferSize:531000
[junit4] 2> 27319 INFO (SocketProxy-Acceptor-57664) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=50172,localport=36500], receiveBufferSize=530904
[junit4] 2> 27366 INFO (qtp2144719958-68) [n:127.0.0.1:57664_
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request
[collection1] webapp= path=/select
params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2}
hits=0 status=0 QTime=44
[junit4] 2> 27370 INFO (SocketProxy-Acceptor-33922) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=33403,localport=33922], receiveBufferSize:531000
[junit4] 2> 27371 INFO (SocketProxy-Acceptor-33922) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=40894,localport=58492], receiveBufferSize=530904
[junit4] 2> 27373 INFO (qtp1916420853-121) [n:127.0.0.1:33922_
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.S.Request
[collection1] webapp= path=/select
params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2}
hits=0 status=0 QTime=1
[junit4] 2> 27375 INFO (SocketProxy-Acceptor-50562) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=35359,localport=50562], receiveBufferSize:531000
[junit4] 2> 27375 INFO (SocketProxy-Acceptor-50562) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=42615,localport=58819], receiveBufferSize=530904
[junit4] 2> 27378 INFO (qtp1849265150-94) [n:127.0.0.1:50562_
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.S.Request
[collection1] webapp= path=/select
params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2}
hits=0 status=0 QTime=0
[junit4] 2> 29381 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase Creating collection with stateFormat=1:
c8n_1x3_lf
[junit4] 2> 29383 INFO (SocketProxy-Acceptor-50562) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=35375,localport=50562], receiveBufferSize:531000
[junit4] 2> 29412 INFO (SocketProxy-Acceptor-50562) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=42615,localport=58835], receiveBufferSize=530904
[junit4] 2> 29413 INFO (qtp1849265150-97) [n:127.0.0.1:50562_ ]
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params
replicationFactor=3&maxShardsPerNode=1&name=c8n_1x3_lf&action=CREATE&numShards=1&stateFormat=1&wt=javabin&version=2
and sendToOCPQueue=true
[junit4] 2> 29439 INFO
(OverseerThreadFactory-6-thread-1-processing-n:127.0.0.1:51416_)
[n:127.0.0.1:51416_ ] o.a.s.c.CreateCollectionCmd Create collection
c8n_1x3_lf
[junit4] 2> 29447 INFO
(OverseerThreadFactory-6-thread-1-processing-n:127.0.0.1:51416_)
[n:127.0.0.1:51416_ ] o.a.s.c.CreateCollectionCmd Only one config set found
in zk - using it:conf1
[junit4] 2> 29581 INFO (SocketProxy-Acceptor-51416) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=43216,localport=51416], receiveBufferSize:531000
[junit4] 2> 29586 INFO (SocketProxy-Acceptor-51416) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=34910,localport=57392], receiveBufferSize=530904
[junit4] 2> 29586 INFO (SocketProxy-Acceptor-50562) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=35380,localport=50562], receiveBufferSize:531000
[junit4] 2> 29588 INFO (SocketProxy-Acceptor-33922) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=33428,localport=33922], receiveBufferSize:531000
[junit4] 2> 29595 INFO (SocketProxy-Acceptor-50562) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=42615,localport=58840], receiveBufferSize=530904
[junit4] 2> 29595 INFO (qtp1245658241-29) [n:127.0.0.1:51416_ ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&name=c8n_1x3_lf_shard1_replica1&action=CREATE&numShards=1&collection=c8n_1x3_lf&shard=shard1&wt=javabin&version=2
[junit4] 2> 29597 INFO (SocketProxy-Acceptor-33922) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=40894,localport=58517], receiveBufferSize=530904
[junit4] 2> 29614 INFO (qtp1849265150-96) [n:127.0.0.1:50562_ ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&name=c8n_1x3_lf_shard1_replica2&action=CREATE&numShards=1&collection=c8n_1x3_lf&shard=shard1&wt=javabin&version=2
[junit4] 2> 29621 INFO (qtp1916420853-126) [n:127.0.0.1:33922_ ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&name=c8n_1x3_lf_shard1_replica3&action=CREATE&numShards=1&collection=c8n_1x3_lf&shard=shard1&wt=javabin&version=2
[junit4] 2> 30659 INFO (qtp1245658241-29) [n:127.0.0.1:51416_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica1] o.a.s.c.SolrConfig Using
Lucene MatchVersion: 6.5.0
[junit4] 2> 30670 INFO (qtp1849265150-96) [n:127.0.0.1:50562_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica2] o.a.s.c.SolrConfig Using
Lucene MatchVersion: 6.5.0
[junit4] 2> 30676 INFO (qtp1916420853-126) [n:127.0.0.1:33922_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica3] o.a.s.c.SolrConfig Using
Lucene MatchVersion: 6.5.0
[junit4] 2> 30709 INFO (qtp1245658241-29) [n:127.0.0.1:51416_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica1] o.a.s.s.IndexSchema
[c8n_1x3_lf_shard1_replica1] Schema name=test
[junit4] 2> 30733 INFO (qtp1849265150-96) [n:127.0.0.1:50562_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica2] o.a.s.s.IndexSchema
[c8n_1x3_lf_shard1_replica2] Schema name=test
[junit4] 2> 30764 INFO (qtp1916420853-126) [n:127.0.0.1:33922_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica3] o.a.s.s.IndexSchema
[c8n_1x3_lf_shard1_replica3] Schema name=test
[junit4] 2> 31056 WARN (qtp1245658241-29) [n:127.0.0.1:51416_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica1] o.a.s.s.IndexSchema
[c8n_1x3_lf_shard1_replica1] default search field in schema is text. WARNING:
Deprecated, please use 'df' on request instead.
[junit4] 2> 31063 WARN (qtp1849265150-96) [n:127.0.0.1:50562_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica2] o.a.s.s.IndexSchema
[c8n_1x3_lf_shard1_replica2] default search field in schema is text. WARNING:
Deprecated, please use 'df' on request instead.
[junit4] 2> 31085 INFO (qtp1849265150-96) [n:127.0.0.1:50562_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica2] o.a.s.s.IndexSchema Loaded
schema test/1.0 with uniqueid field id
[junit4] 2> 31091 INFO (qtp1245658241-29) [n:127.0.0.1:51416_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica1] o.a.s.s.IndexSchema Loaded
schema test/1.0 with uniqueid field id
[junit4] 2> 31111 INFO (qtp1849265150-96) [n:127.0.0.1:50562_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica2] o.a.s.c.CoreContainer
Creating SolrCore 'c8n_1x3_lf_shard1_replica2' using configuration from
collection c8n_1x3_lf
[junit4] 2> 31121 INFO (qtp1849265150-96) [n:127.0.0.1:50562_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica2] o.a.s.c.SolrCore
[[c8n_1x3_lf_shard1_replica2] ] Opening new SolrCore at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-2-001/cores/c8n_1x3_lf_shard1_replica2],
dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/shard-2-001/cores/c8n_1x3_lf_shard1_replica2/data/]
[junit4] 2> 31122 INFO (qtp1849265150-96) [n:127.0.0.1:50562_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica2] o.a.s.c.JmxMonitoredMap
JMX monitoring is enabled. Adding Solr mbeans to JMX Server:
com.sun.jmx.mbeanserver.JmxMBeanServer@364973c
[junit4] 2> 31128 INFO (qtp1849265150-96) [n:127.0.0.1:50562_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica2] o.a.s.u.RandomMergePolicy
RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy:
[TieredMergePolicy: maxMergeAtOnce=44, maxMergeAtOnceExplicit=19,
maxMergedSegmentMB=38.994140625, floorSegmentMB=0.6474609375,
forceMergeDeletesPctAllowed=20.107316332877765, segmentsPerTier=48.0,
maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.8521295305705854
[junit4] 2> 31141 INFO (qtp1245658241-29) [n:127.0.0.1:51416_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica1] o.a.s.c.CoreContainer
Creating SolrCore 'c8n_1x3_lf_shard1_replica1' using configuration from
collection c8n_1x3_lf
[junit4] 2> 31141 INFO (qtp1245658241-29) [n:127.0.0.1:51416_
c:c8n_1x3_lf s:shard1 x:c8n_1x3_lf_shard1_replica1] o.a.s.c.SolrCore
[[c8n_1x3_lf_shard1_replica1] ] Opening new SolrCore at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/control-001/cores/c8n_1x3_lf_shard1_replica1],
dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001/control-001/cores/c8n_1x3_lf_shard1_replica1/data/]
[jun
[...truncated too long message...]
4] 2> at
org.apache.solr.client.solrj.embedded.JettySolrRunner$DebugFilter.doFilter(JettySolrRunner.java:136)
[junit4] 2> at
org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1699)
[junit4] 2> at
org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:582)
[junit4] 2> at
org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:224)
[junit4] 2> at
org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1180)
[junit4] 2> at
org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:512)
[junit4] 2> at
org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:185)
[junit4] 2> at
org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1112)
[junit4] 2> at
org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141)
[junit4] 2> at
org.eclipse.jetty.server.handler.gzip.GzipHandler.handle(GzipHandler.java:462)
[junit4] 2> at
org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:134)
[junit4] 2> at
org.eclipse.jetty.server.Server.handle(Server.java:534)
[junit4] 2> at
org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:320)
[junit4] 2> at
org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:251)
[junit4] 2> at
org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:273)
[junit4] 2> at
org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:95)
[junit4] 2> at
org.eclipse.jetty.io.SelectChannelEndPoint$2.run(SelectChannelEndPoint.java:93)
[junit4] 2> at
org.eclipse.jetty.util.thread.strategy.ExecuteProduceConsume.executeProduceConsume(ExecuteProduceConsume.java:303)
[junit4] 2> at
org.eclipse.jetty.util.thread.strategy.ExecuteProduceConsume.produceConsume(ExecuteProduceConsume.java:148)
[junit4] 2> at
org.eclipse.jetty.util.thread.strategy.ExecuteProduceConsume.run(ExecuteProduceConsume.java:136)
[junit4] 2> at
org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:671)
[junit4] 2> at
org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:589)
[junit4] 2> at java.lang.Thread.run(Thread.java:745)
[junit4] 2>
[junit4] 2> 156001 INFO (qtp1916420853-325) [n:127.0.0.1:33922_ ]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores
params={nodeName=127.0.0.1:50562_&onlyIfLeaderActive=true&core=c8n_1x3_lf_shard1_replica3&coreNodeName=core_node2&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2}
status=400 QTime=16031
[junit4] 2> 157433 WARN
(zkCallback-28-thread-4-processing-n:127.0.0.1:33922_) [n:127.0.0.1:33922_
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.SyncStrategy Closed,
skipping sync up.
[junit4] 2> 157433 INFO
(zkCallback-28-thread-4-processing-n:127.0.0.1:33922_) [n:127.0.0.1:33922_
c:collection1 s:shard2 r:core_node3 x:collection1]
o.a.s.c.ShardLeaderElectionContext We failed sync, but we have no versions - we
can't sync in that case - we were active before, so become leader anyway
[junit4] 2> 157433 INFO
(zkCallback-28-thread-4-processing-n:127.0.0.1:33922_) [n:127.0.0.1:33922_
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.SolrCore
[collection1] CLOSING SolrCore org.apache.solr.core.SolrCore@25e2568f
[junit4] 2> 157448 INFO
(zkCallback-28-thread-4-processing-n:127.0.0.1:33922_) [n:127.0.0.1:33922_
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.m.SolrMetricManager
Closing metric reporters for: solr.core.collection1
[junit4] 2> 157448 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.m.SolrMetricManager Closing metric reporters for: solr.node
[junit4] 2> 157449 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.AbstractConnector Stopped
ServerConnector@4c16c812{HTTP/1.1,[http/1.1]}{127.0.0.1:0}
[junit4] 2> 157449 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.e.j.s.h.ContextHandler Stopped
o.e.j.s.ServletContextHandler@64e809ff{/,null,UNAVAILABLE}
[junit4] 2> 157453 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.ZkTestServer connecting to 127.0.0.1:35156 35156
[junit4] 2> 157553 INFO (Thread-1) [ ] o.a.s.c.ZkTestServer connecting
to 127.0.0.1:35156 35156
[junit4] 2> 157555 WARN (Thread-1) [ ] o.a.s.c.ZkTestServer Watch
limit violations:
[junit4] 2> Maximum concurrent create/delete watches above limit:
[junit4] 2>
[junit4] 2> 5 /solr/aliases.json
[junit4] 2> 5 /solr/clusterprops.json
[junit4] 2> 4 /solr/security.json
[junit4] 2> 4 /solr/configs/conf1
[junit4] 2> 3 /solr/collections/c8n_1x3_lf/state.json
[junit4] 2> 3 /solr/collections/collection1/state.json
[junit4] 2>
[junit4] 2> Maximum concurrent data watches above limit:
[junit4] 2>
[junit4] 2> 5 /solr/clusterstate.json
[junit4] 2>
[junit4] 2> Maximum concurrent children watches above limit:
[junit4] 2>
[junit4] 2> 150 /solr/overseer/collection-queue-work
[junit4] 2> 53 /solr/overseer/queue
[junit4] 2> 15 /solr/overseer/queue-work
[junit4] 2> 5 /solr/live_nodes
[junit4] 2> 5 /solr/collections
[junit4] 2>
[junit4] 2> 157555 WARN
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.SocketProxy Closing 4 connections to: http://127.0.0.1:57664/, target:
http://127.0.0.1:50172/
[junit4] 2> 157555 WARN
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.SocketProxy Closing 0 connections to: http://127.0.0.1:51416/, target:
http://127.0.0.1:34910/
[junit4] 2> 157555 WARN
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.SocketProxy Closing 1 connections to: http://127.0.0.1:50562/, target:
http://127.0.0.1:42615/
[junit4] 2> 157555 WARN
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[30587AEB416A1177]) [ ]
o.a.s.c.SocketProxy Closing 16 connections to: http://127.0.0.1:33922/, target:
http://127.0.0.1:40894/
[junit4] 2> NOTE: reproduce with: ant test
-Dtestcase=LeaderFailoverAfterPartitionTest -Dtests.method=test
-Dtests.seed=30587AEB416A1177 -Dtests.multiplier=2 -Dtests.slow=true
-Dtests.locale=sr -Dtests.timezone=Asia/Katmandu -Dtests.asserts=true
-Dtests.file.encoding=ISO-8859-1
[junit4] FAILURE 157s J0 | LeaderFailoverAfterPartitionTest.test <<<
[junit4] > Throwable #1: java.lang.AssertionError: Expected 2 of 3
replicas to be active but only found 1;
[core_node3:{"core":"c8n_1x3_lf_shard1_replica3","base_url":"http://127.0.0.1:33922","node_name":"127.0.0.1:33922_","state":"active","leader":"true"}];
clusterState: DocCollection(c8n_1x3_lf//clusterstate.json/37)={
[junit4] > "replicationFactor":"3",
[junit4] > "shards":{"shard1":{
[junit4] > "range":"80000000-7fffffff",
[junit4] > "state":"active",
[junit4] > "replicas":{
[junit4] > "core_node1":{
[junit4] > "core":"c8n_1x3_lf_shard1_replica1",
[junit4] > "base_url":"http://127.0.0.1:51416",
[junit4] > "node_name":"127.0.0.1:51416_",
[junit4] > "state":"down"},
[junit4] > "core_node2":{
[junit4] > "state":"down",
[junit4] > "base_url":"http://127.0.0.1:50562",
[junit4] > "core":"c8n_1x3_lf_shard1_replica2",
[junit4] > "node_name":"127.0.0.1:50562_"},
[junit4] > "core_node3":{
[junit4] > "core":"c8n_1x3_lf_shard1_replica3",
[junit4] > "base_url":"http://127.0.0.1:33922",
[junit4] > "node_name":"127.0.0.1:33922_",
[junit4] > "state":"active",
[junit4] > "leader":"true"}}}},
[junit4] > "router":{"name":"compositeId"},
[junit4] > "maxShardsPerNode":"1",
[junit4] > "autoAddReplicas":"false"}
[junit4] > at
__randomizedtesting.SeedInfo.seed([30587AEB416A1177:B80C4531EF967C8F]:0)
[junit4] > at
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:168)
[junit4] > at
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:55)
[junit4] > at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:992)
[junit4] > at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:967)
[junit4] > at java.lang.Thread.run(Thread.java:745)
[junit4] 2> 157562 INFO
(SUITE-LeaderFailoverAfterPartitionTest-seed#[30587AEB416A1177]-worker) [ ]
o.a.s.SolrTestCaseJ4 ###deleteCore
[junit4] 2> NOTE: leaving temporary files on disk at:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-6.x/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_30587AEB416A1177-001
[junit4] 2> Feb 19, 2017 12:45:49 PM
com.carrotsearch.randomizedtesting.ThreadLeakControl checkThreadLeaks
[junit4] 2> WARNING: Will linger awaiting termination of 1 leaked
thread(s).
[junit4] 2> NOTE: test params are: codec=Lucene62,
sim=RandomSimilarity(queryNorm=true,coord=crazy): {}, locale=sr,
timezone=Asia/Katmandu
[junit4] 2> NOTE: Linux 3.13.0-85-generic amd64/Oracle Corporation
1.8.0_121 (64-bit)/cpus=4,threads=1,free=151887896,total=289931264
[junit4] 2> NOTE: All tests run in this JVM:
[LeaderFailoverAfterPartitionTest]
[junit4] Completed [40/696 (1!)] on J0 in 160.91s, 1 test, 1 failure <<<
FAILURES!
[...truncated 64777 lines...]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]